diff --git a/.github/workflows-doc.md b/.github/workflows-doc.md index 5d245f99a3..a4f406a962 100644 --- a/.github/workflows-doc.md +++ b/.github/workflows-doc.md @@ -69,7 +69,7 @@ As of October 2020, GitHub Actions do not persist between different jobs in the - name: Compressing Images run: tar -zcvf images.tar.gz /tmp/images - name: Cache Images - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: Docker Images path: ./images.tar.gz diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-image.yml new file mode 100644 index 0000000000..3eb6312a18 --- /dev/null +++ b/.github/workflows/build-image.yml @@ -0,0 +1,68 @@ +name: Build Image + +on: + push: + branches: [ master ] + paths: + - 'build-image/**' + - '.github/workflows/build-image.yml' + pull_request: + branches: [ master ] + paths: + - 'build-image/**' + - '.github/workflows/build-image.yml' + +jobs: + build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + name: Checkout + with: + fetch-depth: 0 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Save image + run: make save-multiarch-build-image + + - name: Upload Docker Images Artifacts + uses: actions/upload-artifact@v4 + with: + name: build-image + path: | + ./build-image-amd64.tar + ./build-image-arm64.tar + if-no-files-found: error + + push: + needs: build + if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + name: Checkout + with: + fetch-depth: 0 + + - name: Download Docker Images Artifacts + uses: actions/download-artifact@v4 + with: + name: build-image + + - name: Load image + run: make load-multiarch-build-image + + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{secrets.QUAY_REGISTRY_USER}} + password: ${{secrets.QUAY_REGISTRY_PASSWORD}} + + - name: Push image + run: make push-multiarch-build-image diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index dfa752c596..6775f69715 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -5,13 +5,19 @@ on: branches: [master] tags: - v[0-9]+.[0-9]+.[0-9]+** # Tag filters not as strict due to different regex system on Github Actions + paths-ignore: + - 'build-image/**' + - '.github/workflows/build-image.yml' pull_request: + paths-ignore: + - 'build-image/**' + - '.github/workflows/build-image.yml' jobs: lint: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-6dd64fcce steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -40,7 +46,7 @@ jobs: test: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-6dd64fcce steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -54,7 +60,10 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Run Tests - run: make BUILD_IN_CONTAINER=false test + run: | + for run in $(seq 1 100); do + make BUILD_IN_CONTAINER=false test || true + done security: name: CodeQL @@ -83,7 +92,7 @@ jobs: build: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-6dd64fcce steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -107,7 +116,7 @@ jobs: touch build-image/.uptodate make BUILD_IN_CONTAINER=false web-build - name: Upload Website Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: website public path: website/public/ @@ -119,7 +128,7 @@ jobs: - name: Create Docker Images Archive run: tar -cvf images.tar /tmp/images - name: Upload Docker Images Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: Docker Images path: ./images.tar @@ -152,7 +161,7 @@ jobs: sudo mkdir -p /go/src/github.com/cortexproject/cortex sudo ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive @@ -194,7 +203,9 @@ jobs: export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG-amd64" export CORTEX_CHECKOUT_DIR="/go/src/github.com/cortexproject/cortex" echo "Running integration tests with image: $CORTEX_IMAGE" - go test -tags=integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... + for run in $(seq 1 100); do + go test -tags=integration,${{ matrix.tags }} -timeout 2400s -v -count=1 -failfast ./integration/... || true + done env: IMAGE_PREFIX: ${{ secrets.IMAGE_PREFIX }} @@ -207,7 +218,7 @@ jobs: - name: Install Docker Client run: sudo ./.github/workflows/scripts/install-docker.sh - name: Download Docker Images Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive @@ -217,14 +228,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:node_version_upgrade-60582e680 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:master-6dd64fcce TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-6dd64fcce steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -241,7 +252,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Website Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: website public path: website/public @@ -266,7 +277,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-6dd64fcce steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -282,7 +293,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive diff --git a/.gitignore b/.gitignore index ac025027c1..5749235286 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,6 @@ Makefile.local .vscode compose compose-simple + +/build-image-arm64.tar +/build-image-amd64.tar diff --git a/CHANGELOG.md b/CHANGELOG.md index ab44924ca6..8328ea318a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## master / unreleased * [CHANGE] Upgrade Dockerfile Node version from 14x to 18x. #5906 * [CHANGE] Ingester: Remove `-querier.query-store-for-labels-enabled` flag. Querying long-term store for labels is always enabled. #5984 +* [FEATURE] Ingester: Experimental: Enable native histogram ingestion via `-blocks-storage.tsdb.enable-native-histograms` flag. #5986 * [ENHANCEMENT] rulers: Add support to persist tokens in rulers. #5987 * [ENHANCEMENT] Query Frontend/Querier: Added store gateway postings touched count and touched size in Querier stats and log in Query Frontend. #5892 * [ENHANCEMENT] Query Frontend/Querier: Returns `warnings` on prometheus query responses. #5916 @@ -11,7 +12,7 @@ * [ENHANCEMENT] Distributor/Querier: Clean stale per-ingester metrics after ingester restarts. #5930 * [ENHANCEMENT] Distributor/Ring: Allow disabling detailed ring metrics by ring member. #5931 * [ENHANCEMENT] KV: Etcd Added etcd.ping-without-stream-allowed parameter to disable/enable PermitWithoutStream #5933 -* [ENHANCEMENT] Ingester: Add a new `max_series_per_label_set` limit. This limit functions similarly to `max_series_per_metric`, but allowing users to define the maximum number of series per LabelSet. #5950 +* [ENHANCEMENT] Ingester: Add a new `limits_per_label_set` limit. This limit functions similarly to `max_series_per_metric`, but allowing users to define the maximum number of series per LabelSet. #5950 #5993 * [ENHANCEMENT] Store Gateway: Log gRPC requests together with headers configured in `http_request_headers_to_log`. #5958 * [BUGFIX] Configsdb: Fix endline issue in db password. #5920 * [BUGFIX] Ingester: Fix `user` and `type` labels for the `cortex_ingester_tsdb_head_samples_appended_total` TSDB metric. #5952 diff --git a/Makefile b/Makefile index 7c0c10a337..762d3b669e 100644 --- a/Makefile +++ b/Makefile @@ -53,14 +53,7 @@ fetch-build-image: docker tag $(BUILD_IMAGE):$(LATEST_BUILD_IMAGE_TAG) $(BUILD_IMAGE):latest touch build-image/.uptodate -push-multiarch-build-image: - @echo - # Build image for each platform separately... it tends to generate fewer errors. - $(SUDO) docker buildx build --platform linux/amd64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) build-image/ - $(SUDO) docker buildx build --platform linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) build-image/ - # This command will run the same build as above, but it will reuse existing platform-specific images, - # put them together and push to registry. - $(SUDO) docker buildx build -o type=registry --platform linux/amd64,linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) build-image/ +-include build-image/Makefile # We don't want find to scan inside a bunch of directories, to accelerate the # 'make: Entering directory '/go/src/github.com/cortexproject/cortex' phase. @@ -122,7 +115,7 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= node_version_upgrade-60582e680 +LATEST_BUILD_IMAGE_TAG ?= master-6dd64fcce # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden @@ -223,7 +216,7 @@ lint: ./pkg/ruler/... test: - go test -tags netgo -timeout 30m -race -count 1 ./... + go test -tags netgo -timeout 30m -race -failfast -count 1 ./... cover: $(eval COVERDIR := $(shell mktemp -d coverage.XXXXXXXXXX)) diff --git a/build-image/Makefile b/build-image/Makefile new file mode 100644 index 0000000000..9844b681ab --- /dev/null +++ b/build-image/Makefile @@ -0,0 +1,17 @@ +save-multiarch-build-image: + @echo + # Build image for each platform separately... it tends to generate fewer errors. + $(SUDO) docker buildx build --platform linux/amd64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG)-amd64 --output type=docker,dest=./build-image-amd64.tar build-image/ + $(SUDO) docker buildx build --platform linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG)-arm64 --output type=docker,dest=./build-image-arm64.tar build-image/ + +load-multiarch-build-image: + $(SUDO) docker load -i build-image-amd64.tar + $(SUDO) docker load -i build-image-arm64.tar + +push-multiarch-build-image: + # This command will run the same build as multiarch-build-image, but it will reuse existing platform-specific images, + # put them together and push to registry. + $(SUDO) docker push $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-amd64 + $(SUDO) docker push $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-arm64 + $(SUDO) docker manifest create $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) --amend $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-amd64 --amend $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-arm64 + $(SUDO) docker manifest push $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 68c12be2ce..1ca05c1b05 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -1441,4 +1441,8 @@ blocks_storage: # be out-of-order. # CLI flag: -blocks-storage.tsdb.out-of-order-cap-max [out_of_order_cap_max: | default = 32] + + # [EXPERIMENTAL] True to enable native histogram. + # CLI flag: -blocks-storage.tsdb.enable-native-histograms + [enable_native_histograms: | default = false] ``` diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index bb88559db5..4b7e504fa4 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -1566,4 +1566,8 @@ blocks_storage: # be out-of-order. # CLI flag: -blocks-storage.tsdb.out-of-order-cap-max [out_of_order_cap_max: | default = 32] + + # [EXPERIMENTAL] True to enable native histogram. + # CLI flag: -blocks-storage.tsdb.enable-native-histograms + [enable_native_histograms: | default = false] ``` diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 33ed426d6d..ab0570c67e 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1998,6 +1998,10 @@ tsdb: # be out-of-order. # CLI flag: -blocks-storage.tsdb.out-of-order-cap-max [out_of_order_cap_max: | default = 32] + + # [EXPERIMENTAL] True to enable native histogram. + # CLI flag: -blocks-storage.tsdb.enable-native-histograms + [enable_native_histograms: | default = false] ``` ### `compactor_config` @@ -3172,9 +3176,9 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # CLI flag: -ingester.max-global-series-per-metric [max_global_series_per_metric: | default = 0] -# [Experimental] The maximum number of active series per LabelSet, across the -# cluster before replication. Empty list to disable. -[max_series_per_label_set: | default = []] +# [Experimental] Enable limits per LabelSet. Supported limits per labelSet: +# [max_series] +[limits_per_label_set: | default = []] # The maximum number of active metrics with metadata per user, per ingester. 0 # to disable. @@ -5314,11 +5318,14 @@ otel: [tls_insecure_skip_verify: | default = false] ``` -### `MaxSeriesPerLabelSet` +### `LimitsPerLabelSet` ```yaml -# The maximum number of active series per LabelSet before replication. -[limit: | default = ] +limits: + # The maximum number of active series per LabelSet, across the cluster before + # replication. Setting the value 0 will enable the monitoring (metrics) but + # would not enforce any limits. + [max_series: | default = ] # LabelSet which the limit should be applied. [label_set: | default = []] diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 65f99096b8..919c0033a8 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -111,3 +111,5 @@ Currently experimental features are: - OTLP Receiver - Persistent tokens in the Ruler Ring: - `-ruler.ring.tokens-file-path` (path) CLI flag +- Native Histograms + - Ingestion can be enabled by setting `-blocks-storage.tsdb.enable-native-histograms=true` on Ingester. diff --git a/docs/contributing/how-to-update-the-build-image.md b/docs/contributing/how-to-update-the-build-image.md index 497ab00a00..df0c62a2f1 100644 --- a/docs/contributing/how-to-update-the-build-image.md +++ b/docs/contributing/how-to-update-the-build-image.md @@ -5,12 +5,10 @@ weight: 5 slug: how-to-update-the-build-image --- -The build image currently can only be updated by a Cortex maintainer. If you're not a maintainer you can still open a PR with the changes, asking a maintainer to assist you publishing the updated image. The procedure is: +The procedure is: 1. Update `build-image/Dockerfile` -1. Run `go env` and make sure `GOPROXY=https://proxy.golang.org,direct` (Go's default). Some environment may required `GOPROXY=direct`, and if you push a build image with this, build workflow on GitHub will take a lot longer to download modules. -1. `docker login quay.io`. Note that pushing to `quay.io/cortexproject/build-image` repository can only be done by a maintainer. -1. Build the and publish the image by using `make push-multiarch-build-image`. This will build and push multi-platform docker image (for linux/amd64 and linux/arm64). Running this step successfully requires [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/), but does not require a specific platform. -1. Replace the image tag in `.github/workflows/*` (_there may be multiple references_) and Makefile (variable `LATEST_BUILD_IMAGE_TAG`). +1. Create a PR to master with that changed, after the PR is merged to master, the new build image is available in the quay.io repository. Check github action logs [here](https://github.com/cortexproject/cortex/actions/workflows/build-image.yml) for to find the image tag. +1. Create another PR to replace the image tag in `.github/workflows/*` (_there may be multiple references_) and Makefile (variable `LATEST_BUILD_IMAGE_TAG`). 1. If you are updating Go's runtime version be sure to change `actions/setup-go`'s `go-version` in ``.github/workflows/*`. 1. Open a PR and make sure the CI with new build-image passes diff --git a/go.mod b/go.mod index 1bfb928f14..05f8397e49 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 github.com/alicebob/miniredis/v2 v2.32.1 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.53.6 + github.com/aws/aws-sdk-go v1.53.20 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/cespare/xxhash v1.1.0 github.com/cortexproject/promqlsmith v0.0.0-20240328172224-5e341f0dd08e @@ -25,16 +25,16 @@ require ( github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.1 - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.28.3 + github.com/hashicorp/consul/api v1.29.1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.6 github.com/hashicorp/memberlist v0.5.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.17.9 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.70 + github.com/minio/minio-go/v7 v7.0.71 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -44,61 +44,60 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.53.0 + github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.52.0-rc.1.0.20240506095108-4b7a44c7a056 + github.com/prometheus/prometheus v0.52.2-0.20240614130246-4c1e71fa0b3d github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 - github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 - github.com/thanos-io/promql-engine v0.0.0-20240405095051-b7d0da367508 - github.com/thanos-io/thanos v0.35.1-0.20240517203736-9e6cbd9fdd9d + github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7 + github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf + github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.13 go.etcd.io/etcd/client/pkg/v3 v3.5.13 go.etcd.io/etcd/client/v3 v3.5.13 go.opentelemetry.io/contrib/propagators/aws v1.22.0 - go.opentelemetry.io/otel v1.26.0 + go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel/bridge/opentracing v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 - go.opentelemetry.io/otel/sdk v1.26.0 - go.opentelemetry.io/otel/trace v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.25.0 + golang.org/x/net v0.26.0 golang.org/x/sync v0.7.0 golang.org/x/time v0.5.0 - google.golang.org/grpc v1.63.2 + google.golang.org/grpc v1.64.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.6.0 github.com/sercand/kuberesolver/v4 v4.0.0 - go.opentelemetry.io/collector/pdata v1.7.0 + go.opentelemetry.io/collector/pdata v1.8.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a google.golang.org/protobuf v1.34.1 ) require ( - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go v0.114.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.6 // indirect - cloud.google.com/go/storage v1.39.1 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + cloud.google.com/go/storage v1.40.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -145,22 +144,20 @@ require ( github.com/gogo/googleapis v1.4.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/pprof v0.0.0-20240416155748-26353dc0451f // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect - github.com/hashicorp/consul/proto-public v0.6.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/serf v0.10.1 // indirect @@ -169,7 +166,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect @@ -194,7 +191,7 @@ require ( github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect - github.com/prometheus/procfs v0.14.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/rueidis v1.0.14-go1.18 // indirect github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.5.0 // indirect @@ -211,32 +208,31 @@ require ( github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/featuregate v1.5.0 // indirect - go.opentelemetry.io/collector/semconv v0.98.0 // indirect + go.opentelemetry.io/collector/semconv v0.101.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.13.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.22.0 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.177.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect + google.golang.org/api v0.183.0 // indirect + google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect diff --git a/go.sum b/go.sum index 276ba403b7..a8f8519aee 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -84,8 +84,8 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= -cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -274,8 +274,8 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -464,8 +464,8 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= -cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -526,12 +526,12 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= @@ -558,8 +558,8 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -601,8 +601,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.6 h1:1/MYh/VmxdJu7v2bwvDA2JS30UI7bg62QYgQ7KxMa/Q= -github.com/aws/aws-sdk-go v1.53.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.20 h1:cYWPvZLP1gPj5CfUdnfjaaA7WFK3FGoJ/R9+Ks1inU4= +github.com/aws/aws-sdk-go v1.53.20/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= @@ -650,6 +650,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= +github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= @@ -693,8 +695,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -715,14 +717,12 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= -github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y= -github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA= -github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -780,6 +780,8 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= @@ -841,8 +843,8 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= -github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -863,7 +865,6 @@ github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFG github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -918,8 +919,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= @@ -950,8 +951,9 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -968,8 +970,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240416155748-26353dc0451f h1:WpZiq8iqvGjJ3m3wzAVKL6+0vz7VkE79iSy9GII00II= -github.com/google/pprof v0.0.0-20240416155748-26353dc0451f/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -994,12 +996,12 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= -github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -1007,27 +1009,27 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 h1:guQyUpELu4I0wKgdsRBZDA5blfGiUleuppRSVy9Qbi0= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.28.3 h1:IE06LST/knnCQ+cxcvzyXRF/DetkgGhJoaOFd4l9xkk= -github.com/hashicorp/consul/api v1.28.3/go.mod h1:7AGcUFu28HkgOKD/GmsIGIFzRTmN0L02AE9Thsr2OhU= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= -github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1076,14 +1078,14 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0= -github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1119,18 +1121,17 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1159,8 +1160,8 @@ github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1 github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= -github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -1203,8 +1204,8 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcs github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= -github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= +github.com/minio/minio-go/v7 v7.0.71 h1:No9XfOKTYi6i0GnBj+WZwD8WP5GZfL7n7GOjRqCdAjA= +github.com/minio/minio-go/v7 v7.0.71/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1249,8 +1250,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -1267,8 +1268,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/oracle/oci-go-sdk/v65 v65.41.1 h1:+lbosOyNiib3TGJDvLq1HwEAuFqkOjPJDIkyxM15WdQ= github.com/oracle/oci-go-sdk/v65 v65.41.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1324,8 +1325,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9 h1:WTZ/GBRTImL1HgRTEnJJcM2FuII7PXX1idCIEUJ8/r8= +github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9/go.mod h1:1Yn/UzXoahbVLk1sn6wsGiSiemz3XQejcaz9FIA1r+I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1339,10 +1340,10 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= -github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ= -github.com/prometheus/prometheus v0.52.0-rc.1.0.20240506095108-4b7a44c7a056 h1:fS2ZgEqwQG+i0B1u53yesp2mBbk4v8OOD/PLioiUUm4= -github.com/prometheus/prometheus v0.52.0-rc.1.0.20240506095108-4b7a44c7a056/go.mod h1:0dSr+q1qLuVNfs5qSfuVN93GOHhTcHrpMvEKvCYY9oE= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.52.2-0.20240614130246-4c1e71fa0b3d h1:FFA2droSZiHghpHgKHefUCZLJnJQo+ZrKkBkcgZ0xp4= +github.com/prometheus/prometheus v0.52.2-0.20240614130246-4c1e71fa0b3d/go.mod h1:5AuUoK+n3jJUhhlc7C2DKdhI+dUx93eJvTYpvE0A3jE= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1350,8 +1351,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= @@ -1360,8 +1361,8 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -1419,12 +1420,12 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4 github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 h1:Q0BjHI7FMe5KkKVXBFYto5VNASxiA/+AEhHup/IT7N0= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3/go.mod h1:ptMYNPgbyAR7a2Ab2t7zHA2/0be2ePyawVR7lp7fZtg= -github.com/thanos-io/promql-engine v0.0.0-20240405095051-b7d0da367508 h1:4X0ThYb7/wTTKS73wT13ixw0lj5OJ87g45RWIZhPZDA= -github.com/thanos-io/promql-engine v0.0.0-20240405095051-b7d0da367508/go.mod h1:FEPnabuTql1bDA4OUM41mwcZOJ20R436k8vq+xtGEG0= -github.com/thanos-io/thanos v0.35.1-0.20240517203736-9e6cbd9fdd9d h1:Uomb1Yvuz1HDCJL6s0rTiHpaJpHvr8NRarO5TprM7Cs= -github.com/thanos-io/thanos v0.35.1-0.20240517203736-9e6cbd9fdd9d/go.mod h1:mwjTxpNgULRgeOr5qWmM2IKiyu4SNh/1JypUyPtlrQA= +github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7 h1:Mu/VxwijVceWmzyHnpYrDBpdsrX4iREVnnB3wX3f1Pg= +github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7/go.mod h1:OdoR9ITIuwn4ldouL24CC3zg8J+fPMb8gg9fdQKQDYE= +github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf h1:R6of9adrCWXhETBstsFzNqrZou5UqeY3fh3k5yv5POY= +github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf/go.mod h1:FEPnabuTql1bDA4OUM41mwcZOJ20R436k8vq+xtGEG0= +github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac h1:Z3Au2M3k6lUZH/9tbkDHlmqGXZb01GrBQ2a8GUZozeE= +github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac/go.mod h1:o9wuuNcTgPmusJuXXNxr39tbC1/solJ4nsZcl3N4VHI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1473,16 +1474,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= -go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= -go.opentelemetry.io/collector/pdata v1.7.0 h1:/WNsBbE6KM3TTPUb9v/5B7IDqnDkgf8GyFhVJJqu7II= -go.opentelemetry.io/collector/pdata v1.7.0/go.mod h1:ehCBBA5GoFrMZkwyZAKGY/lAVSgZf6rzUt3p9mddmPU= -go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= -go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= go.opentelemetry.io/contrib/propagators/aws v1.22.0 h1:SKtPYiel5TWrE9gib3F4/BUcrvVjKsA5CH9xnWvj6cQ= @@ -1493,20 +1492,20 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 h1:+tVlvpiQMOCzi4EYCaBjbl go.opentelemetry.io/contrib/propagators/jaeger v1.13.0/go.mod h1:Qf7eVCLYawiNIB+A81kk8aFDFwYqXSqmt0N2RcvkLLI= go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYohdaBacQfojPYZJgATQ= go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= go.opentelemetry.io/otel/bridge/opentracing v1.26.0/go.mod h1:HfypvOw/8rqu4lXDhwaxVK1ibBAi1lTMXBHV9rywOCw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1544,8 +1543,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1605,8 +1604,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1672,8 +1671,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1703,8 +1702,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1825,8 +1824,9 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -1834,8 +1834,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1851,8 +1851,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1860,7 +1860,6 @@ golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1928,8 +1927,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2007,8 +2006,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= -google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2146,12 +2145,12 @@ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2194,8 +2193,8 @@ google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsA google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/integration/e2e/util.go b/integration/e2e/util.go index 2175928cea..ef877020c0 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -268,14 +268,14 @@ func CreateBlock( return id, errors.Wrap(err, "create compactor") } - id, err = c.Write(dir, h, mint, maxt, nil) + ids, err := c.Write(dir, h, mint, maxt, nil) if err != nil { return id, errors.Wrap(err, "write block") } - - if id.Compare(ulid.ULID{}) == 0 { + if len(ids) == 0 { return id, errors.Errorf("nothing to write, asked for %d samples", numSamples) } + id = ids[0] blockDir := filepath.Join(dir, id.String()) logger := log.NewNopLogger() diff --git a/pkg/alertmanager/alertmanager_client.go b/pkg/alertmanager/alertmanager_client.go index 7cd5865588..041449269a 100644 --- a/pkg/alertmanager/alertmanager_client.go +++ b/pkg/alertmanager/alertmanager_client.go @@ -107,7 +107,7 @@ func dialAlertmanagerClient(cfg grpcclient.Config, addr string, requestDuration if err != nil { return nil, err } - conn, err := grpc.Dial(addr, opts...) + conn, err := grpc.NewClient(addr, opts...) if err != nil { return nil, errors.Wrapf(err, "failed to dial alertmanager %s", addr) } diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 726a167bee..a886a86249 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1729,19 +1729,14 @@ func (m *tsdbCompactorMock) Plan(dir string) ([]string, error) { return args.Get(0).([]string), args.Error(1) } -func (m *tsdbCompactorMock) Write(dest string, b tsdb.BlockReader, mint, maxt int64, parent *tsdb.BlockMeta) (ulid.ULID, error) { - args := m.Called(dest, b, mint, maxt, parent) - return args.Get(0).(ulid.ULID), args.Error(1) -} - -func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) { +func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Block) ([]ulid.ULID, error) { args := m.Called(dest, dirs, open) - return args.Get(0).(ulid.ULID), args.Error(1) + return args.Get(0).([]ulid.ULID), args.Error(1) } -func (m *tsdbCompactorMock) CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) (uid ulid.ULID, err error) { +func (m *tsdbCompactorMock) CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) ([]ulid.ULID, error) { args := m.Called(dest, dirs, open, blockPopulator) - return args.Get(0).(ulid.ULID), args.Error(1) + return args.Get(0).([]ulid.ULID), args.Error(1) } type tsdbPlannerMock struct { diff --git a/pkg/cortex/cortex_test.go b/pkg/cortex/cortex_test.go index a1cd2afc3b..891f860898 100644 --- a/pkg/cortex/cortex_test.go +++ b/pkg/cortex/cortex_test.go @@ -190,7 +190,7 @@ func TestGrpcAuthMiddleware(t *testing.T) { }() } - conn, err := grpc.Dial(net.JoinHostPort(cfg.Server.GRPCListenAddress, strconv.Itoa(cfg.Server.GRPCListenPort)), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(net.JoinHostPort(cfg.Server.GRPCListenAddress, strconv.Itoa(cfg.Server.GRPCListenPort)), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) defer func() { require.NoError(t, conn.Close()) diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index c7deb4f7ab..07bd89f716 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -18,7 +18,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -// ToWriteRequest converts matched slices of Labels, Samples and Metadata into a WriteRequest proto. +// ToWriteRequest converts matched slices of Labels, Samples, Metadata and Histograms into a WriteRequest proto. // It gets timeseries from the pool, so ReuseSlice() should be called when done. func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source WriteRequest_SourceEnum) *WriteRequest { req := &WriteRequest{ @@ -27,13 +27,17 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe Source: source, } - for i, s := range samples { + i := 0 + for i < len(samples) || i < len(histograms) { ts := TimeseriesFromPool() ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...) - ts.Samples = append(ts.Samples, s) + if i < len(samples) { + ts.Samples = append(ts.Samples, samples[i]) + } if i < len(histograms) { ts.Histograms = append(ts.Histograms, histograms[i]) } + i++ req.Timeseries = append(req.Timeseries, PreallocTimeseries{TimeSeries: ts}) } diff --git a/pkg/cortexpb/histograms.go b/pkg/cortexpb/histograms.go new file mode 100644 index 0000000000..2e2afef457 --- /dev/null +++ b/pkg/cortexpb/histograms.go @@ -0,0 +1,120 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cortexpb + +import "github.com/prometheus/prometheus/model/histogram" + +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// HistogramProtoToHistogram extracts a (normal integer) Histogram from the +// provided proto message. The caller has to make sure that the proto message +// represents an interger histogram and not a float histogram. +// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 +func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { + if hp.IsFloatHistogram() { + panic("HistogramProtoToHistogram called with a float histogram") + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), + } +} + +// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the provided proto message. +// The caller has to make sure that the proto message represents a float histogram and not an +// integer histogram, or it panics. +// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L647-L667 +func FloatHistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram { + if !hp.IsFloatHistogram() { + panic("FloatHistogramProtoToFloatHistogram called with an integer histogram") + } + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountFloat(), + Count: hp.GetCountFloat(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeCounts(), + } +} + +// HistogramToHistogramProto converts a (normal integer) Histogram to its protobuf message type. +// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723 +func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + TimestampMs: timestamp, + } +} + +// FloatHistogramToHistogramProto converts a float Histogram to a normal +// Histogram's protobuf message type. +// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739 +func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + TimestampMs: timestamp, + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 3e73153ba0..6c1600c50b 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -69,6 +69,9 @@ const ( // mergeSlicesParallelism is a constant of how much go routines we should use to merge slices, and // it was based on empirical observation: See BenchmarkMergeSlicesParallel mergeSlicesParallelism = 8 + + sampleMetricTypeFloat = "float" + sampleMetricTypeHistogram = "histogram" ) // Distributor is a storage.SampleAppender and a client.Querier which @@ -119,6 +122,8 @@ type Distributor struct { ingesterQueryFailures *prometheus.CounterVec replicationFactor prometheus.Gauge latestSeenSampleTimestampPerUser *prometheus.GaugeVec + + validateMetrics *validation.ValidateMetrics } // Config contains the configuration required to @@ -274,7 +279,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove Namespace: "cortex", Name: "distributor_received_samples_total", Help: "The total number of received samples, excluding rejected and deduped samples.", - }, []string{"user"}), + }, []string{"user", "type"}), receivedExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_received_exemplars_total", @@ -289,7 +294,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove Namespace: "cortex", Name: "distributor_samples_in_total", Help: "The total number of samples that have come in to the distributor, including rejected or deduped samples.", - }, []string{"user"}), + }, []string{"user", "type"}), incomingExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_exemplars_in_total", @@ -345,6 +350,8 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove Name: "cortex_distributor_latest_seen_sample_timestamp_seconds", Help: "Unix timestamp of latest received sample per user.", }, []string{"user"}), + + validateMetrics: validation.NewValidateMetrics(reg), } promauto.With(reg).NewGauge(prometheus.GaugeOpts{ @@ -424,10 +431,12 @@ func (d *Distributor) cleanupInactiveUser(userID string) { d.HATracker.CleanupHATrackerMetricsForUser(userID) - d.receivedSamples.DeleteLabelValues(userID) + d.receivedSamples.DeleteLabelValues(userID, sampleMetricTypeFloat) + d.receivedSamples.DeleteLabelValues(userID, sampleMetricTypeHistogram) d.receivedExemplars.DeleteLabelValues(userID) d.receivedMetadata.DeleteLabelValues(userID) - d.incomingSamples.DeleteLabelValues(userID) + d.incomingSamples.DeleteLabelValues(userID, sampleMetricTypeFloat) + d.incomingSamples.DeleteLabelValues(userID, sampleMetricTypeHistogram) d.incomingExemplars.DeleteLabelValues(userID) d.incomingMetadata.DeleteLabelValues(userID) d.nonHASamples.DeleteLabelValues(userID) @@ -437,7 +446,7 @@ func (d *Distributor) cleanupInactiveUser(userID string) { level.Warn(d.log).Log("msg", "failed to remove cortex_distributor_deduped_samples_total metric for user", "user", userID, "err", err) } - validation.DeletePerUserValidationMetrics(userID, d.log) + validation.DeletePerUserValidationMetrics(d.validateMetrics, userID, d.log) } // Called after distributor is asked to stop via StopAsync. @@ -534,7 +543,7 @@ func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID string, skipLabelNameValidation bool, limits *validation.Limits) (cortexpb.PreallocTimeseries, validation.ValidationError) { d.labelsHistogram.Observe(float64(len(ts.Labels))) - if err := validation.ValidateLabels(limits, userID, ts.Labels, skipLabelNameValidation); err != nil { + if err := validation.ValidateLabels(d.validateMetrics, limits, userID, ts.Labels, skipLabelNameValidation); err != nil { return emptyPreallocSeries, err } @@ -543,7 +552,7 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri // Only alloc when data present samples = make([]cortexpb.Sample, 0, len(ts.Samples)) for _, s := range ts.Samples { - if err := validation.ValidateSample(limits, userID, ts.Labels, s); err != nil { + if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, ts.Labels, s.TimestampMs); err != nil { return emptyPreallocSeries, err } samples = append(samples, s) @@ -555,7 +564,7 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri // Only alloc when data present exemplars = make([]cortexpb.Exemplar, 0, len(ts.Exemplars)) for _, e := range ts.Exemplars { - if err := validation.ValidateExemplar(userID, ts.Labels, e); err != nil { + if err := validation.ValidateExemplar(d.validateMetrics, userID, ts.Labels, e); err != nil { // An exemplar validation error prevents ingesting samples // in the same series object. However, because the current Prometheus // remote write implementation only populates one or the other, @@ -570,8 +579,13 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri if len(ts.Histograms) > 0 { // Only alloc when data present histograms = make([]cortexpb.Histogram, 0, len(ts.Histograms)) - // TODO(yeya24): we need to have validations for native histograms - // at some point. Skip validations for now. + for _, h := range ts.Histograms { + // TODO(yeya24): add other validations for native histogram. + // For example, Prometheus scrape has bucket limit and schema check. + if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, ts.Labels, h.TimestampMs); err != nil { + return emptyPreallocSeries, err + } + } histograms = append(histograms, ts.Histograms...) } @@ -603,14 +617,17 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co now := time.Now() d.activeUsers.UpdateUserTimestamp(userID, now) - numSamples := 0 + numFloatSamples := 0 + numHistogramSamples := 0 numExemplars := 0 for _, ts := range req.Timeseries { - numSamples += len(ts.Samples) + len(ts.Histograms) + numFloatSamples += len(ts.Samples) + numHistogramSamples += len(ts.Histograms) numExemplars += len(ts.Exemplars) } // Count the total samples, exemplars in, prior to validation or deduplication, for comparison with other metrics. - d.incomingSamples.WithLabelValues(userID).Add(float64(numSamples)) + d.incomingSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(numFloatSamples)) + d.incomingSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(numFloatSamples)) d.incomingExemplars.WithLabelValues(userID).Add(float64(numExemplars)) // Count the total number of metadata in. d.incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Metadata))) @@ -638,12 +655,12 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co if errors.Is(err, ha.ReplicasNotMatchError{}) { // These samples have been deduped. - d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numSamples)) + d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numFloatSamples + numHistogramSamples)) return nil, httpgrpc.Errorf(http.StatusAccepted, err.Error()) } if errors.Is(err, ha.TooManyReplicaGroupsError{}) { - validation.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numSamples)) + d.validateMetrics.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numFloatSamples + numHistogramSamples)) return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } @@ -651,18 +668,19 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co } // If there wasn't an error but removeReplica is false that means we didn't find both HA labels. if !removeReplica { - d.nonHASamples.WithLabelValues(userID).Add(float64(numSamples)) + d.nonHASamples.WithLabelValues(userID).Add(float64(numFloatSamples + numHistogramSamples)) } } // A WriteRequest can only contain series or metadata but not both. This might change in the future. - seriesKeys, validatedTimeseries, validatedSamples, validatedExemplars, firstPartialErr, err := d.prepareSeriesKeys(ctx, req, userID, limits, removeReplica) + seriesKeys, validatedTimeseries, validatedFloatSamples, validatedHistogramSamples, validatedExemplars, firstPartialErr, err := d.prepareSeriesKeys(ctx, req, userID, limits, removeReplica) if err != nil { return nil, err } metadataKeys, validatedMetadata, firstPartialErr := d.prepareMetadataKeys(req, limits, userID, firstPartialErr) - d.receivedSamples.WithLabelValues(userID).Add(float64(validatedSamples)) + d.receivedSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(validatedFloatSamples)) + d.receivedSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(validatedHistogramSamples)) d.receivedExemplars.WithLabelValues(userID).Add(float64(validatedExemplars)) d.receivedMetadata.WithLabelValues(userID).Add(float64(len(validatedMetadata))) @@ -673,18 +691,19 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co return &cortexpb.WriteResponse{}, firstPartialErr } - totalN := validatedSamples + validatedExemplars + len(validatedMetadata) + totalSamples := validatedFloatSamples + validatedHistogramSamples + totalN := totalSamples + validatedExemplars + len(validatedMetadata) if !d.ingestionRateLimiter.AllowN(now, userID, totalN) { // Ensure the request slice is reused if the request is rate limited. cortexpb.ReuseSlice(req.Timeseries) - validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamples)) - validation.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars)) - validation.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(len(validatedMetadata))) + d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(totalSamples)) + d.validateMetrics.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars)) + d.validateMetrics.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(len(validatedMetadata))) // Return a 429 here to tell the client it is going too fast. // Client may discard the data or slow down and re-send. // Prometheus v2.26 added a remote-write option 'retry_on_http_429'. - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), validatedSamples, len(validatedMetadata)) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), totalSamples, len(validatedMetadata)) } // totalN included samples and metadata. Ingester follows this pattern when computing its ingestion rate. @@ -790,7 +809,7 @@ func (d *Distributor) prepareMetadataKeys(req *cortexpb.WriteRequest, limits *va metadataKeys := make([]uint32, 0, len(req.Metadata)) for _, m := range req.Metadata { - err := validation.ValidateMetadata(limits, userID, m) + err := validation.ValidateMetadata(d.validateMetrics, limits, userID, m) if err != nil { if firstPartialErr == nil { @@ -806,7 +825,7 @@ func (d *Distributor) prepareMetadataKeys(req *cortexpb.WriteRequest, limits *va return metadataKeys, validatedMetadata, firstPartialErr } -func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.WriteRequest, userID string, limits *validation.Limits, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseries, int, int, error, error) { +func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.WriteRequest, userID string, limits *validation.Limits, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseries, int, int, int, error, error) { pSpan, _ := opentracing.StartSpanFromContext(ctx, "prepareSeriesKeys") defer pSpan.Finish() @@ -814,7 +833,8 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write // check each sample/metadata and discard if outside limits. validatedTimeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) seriesKeys := make([]uint32, 0, len(req.Timeseries)) - validatedSamples := 0 + validatedFloatSamples := 0 + validatedHistogramSamples := 0 validatedExemplars := 0 var firstPartialErr error @@ -835,13 +855,15 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write if len(ts.Samples) > 0 { latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) } - // TODO(yeya24): use timestamp of the latest native histogram in the series as well. + if len(ts.Histograms) > 0 { + latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].TimestampMs) + } if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 { l, _ := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) if len(l) == 0 { // all labels are gone, samples will be discarded - validation.DiscardedSamples.WithLabelValues( + d.validateMetrics.DiscardedSamples.WithLabelValues( validation.DroppedByRelabelConfiguration, userID, ).Add(float64(len(ts.Samples))) @@ -862,7 +884,7 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write } if len(ts.Labels) == 0 { - validation.DiscardedExemplars.WithLabelValues( + d.validateMetrics.DiscardedExemplars.WithLabelValues( validation.DroppedByUserConfigurationOverride, userID, ).Add(float64(len(ts.Samples))) @@ -881,7 +903,7 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write // label and dropped labels (if any) key, err := d.tokenForLabels(userID, ts.Labels) if err != nil { - return nil, nil, 0, 0, nil, err + return nil, nil, 0, 0, 0, nil, err } validatedSeries, validationErr := d.validateSeries(ts, userID, skipLabelNameValidation, limits) @@ -900,11 +922,11 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write seriesKeys = append(seriesKeys, key) validatedTimeseries = append(validatedTimeseries, validatedSeries) - // TODO(yeya24): add histogram samples as well when supported. - validatedSamples += len(ts.Samples) + validatedFloatSamples += len(ts.Samples) + validatedHistogramSamples += len(ts.Histograms) validatedExemplars += len(ts.Exemplars) } - return seriesKeys, validatedTimeseries, validatedSamples, validatedExemplars, firstPartialErr, nil + return seriesKeys, validatedTimeseries, validatedFloatSamples, validatedHistogramSamples, validatedExemplars, firstPartialErr, nil } func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) { diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 802c4996a5..cdd9ed0afc 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -45,6 +45,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/flagext" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" @@ -341,13 +342,15 @@ func TestDistributor_MetricsCleanup(t *testing.T) { "cortex_distributor_ingester_queries_total", } - d.receivedSamples.WithLabelValues("userA").Add(5) - d.receivedSamples.WithLabelValues("userB").Add(10) + d.receivedSamples.WithLabelValues("userA", sampleMetricTypeFloat).Add(5) + d.receivedSamples.WithLabelValues("userB", sampleMetricTypeFloat).Add(10) + d.receivedSamples.WithLabelValues("userC", sampleMetricTypeHistogram).Add(15) d.receivedExemplars.WithLabelValues("userA").Add(5) d.receivedExemplars.WithLabelValues("userB").Add(10) d.receivedMetadata.WithLabelValues("userA").Add(5) d.receivedMetadata.WithLabelValues("userB").Add(10) - d.incomingSamples.WithLabelValues("userA").Add(5) + d.incomingSamples.WithLabelValues("userA", sampleMetricTypeFloat).Add(5) + d.incomingSamples.WithLabelValues("userB", sampleMetricTypeHistogram).Add(6) d.incomingExemplars.WithLabelValues("userA").Add(5) d.incomingMetadata.WithLabelValues("userA").Add(5) d.nonHASamples.WithLabelValues("userA").Add(5) @@ -388,8 +391,9 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. # TYPE cortex_distributor_received_samples_total counter - cortex_distributor_received_samples_total{user="userA"} 5 - cortex_distributor_received_samples_total{user="userB"} 10 + cortex_distributor_received_samples_total{type="float",user="userA"} 5 + cortex_distributor_received_samples_total{type="float",user="userB"} 10 + cortex_distributor_received_samples_total{type="histogram",user="userC"} 15 # HELP cortex_distributor_received_exemplars_total The total number of received exemplars, excluding rejected and deduped exemplars. # TYPE cortex_distributor_received_exemplars_total counter @@ -398,7 +402,8 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # HELP cortex_distributor_samples_in_total The total number of samples that have come in to the distributor, including rejected or deduped samples. # TYPE cortex_distributor_samples_in_total counter - cortex_distributor_samples_in_total{user="userA"} 5 + cortex_distributor_samples_in_total{type="float",user="userA"} 5 + cortex_distributor_samples_in_total{type="histogram",user="userB"} 6 # HELP cortex_distributor_exemplars_in_total The total number of exemplars that have come in to the distributor, including rejected or deduped exemplars. # TYPE cortex_distributor_exemplars_in_total counter @@ -457,15 +462,17 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. # TYPE cortex_distributor_received_samples_total counter - cortex_distributor_received_samples_total{user="userB"} 10 + cortex_distributor_received_samples_total{type="float",user="userB"} 10 + cortex_distributor_received_samples_total{type="histogram",user="userC"} 15 + + # HELP cortex_distributor_samples_in_total The total number of samples that have come in to the distributor, including rejected or deduped samples. + # TYPE cortex_distributor_samples_in_total counter + cortex_distributor_samples_in_total{type="histogram",user="userB"} 6 # HELP cortex_distributor_received_exemplars_total The total number of received exemplars, excluding rejected and deduped exemplars. # TYPE cortex_distributor_received_exemplars_total counter cortex_distributor_received_exemplars_total{user="userB"} 10 - # HELP cortex_distributor_samples_in_total The total number of samples that have come in to the distributor, including rejected or deduped samples. - # TYPE cortex_distributor_samples_in_total counter - # HELP cortex_distributor_exemplars_in_total The total number of exemplars that have come in to the distributor, including rejected or deduped exemplars. # TYPE cortex_distributor_exemplars_in_total counter @@ -2506,6 +2513,22 @@ func mockWriteRequest(lbls []labels.Labels, value float64, timestampMs int64) *c return cortexpb.ToWriteRequest(lbls, samples, nil, nil, cortexpb.API) } +// nolint:unused +func mockHistogramWriteRequest(lbls []labels.Labels, value int, timestampMs int64, float bool) *cortexpb.WriteRequest { + histograms := make([]cortexpb.Histogram, len(lbls)) + for i := range lbls { + if float { + fh := histogram_util.GenerateTestFloatHistogram(value) + histograms[i] = cortexpb.FloatHistogramToHistogramProto(timestampMs, fh) + continue + } + h := histogram_util.GenerateTestHistogram(value) + histograms[i] = cortexpb.HistogramToHistogramProto(timestampMs, h) + } + + return cortexpb.ToWriteRequest(lbls, nil, nil, histograms, cortexpb.API) +} + type prepConfig struct { numIngesters, happyIngesters int queryDelay time.Duration @@ -3589,9 +3612,6 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing limits: &limits, }) - regs[0].MustRegister(validation.DiscardedSamples) - validation.DiscardedSamples.Reset() - // Push the series to the distributor req := mockWriteRequest(inputSeries, 1, 1) ctx := user.InjectOrgID(context.Background(), "userDistributorPushRelabelDropWillExportMetricOfDroppedSamples") @@ -3613,9 +3633,9 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing cortex_discarded_samples_total{reason="relabel_configuration",user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 1 # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. # TYPE cortex_distributor_received_samples_total counter - cortex_distributor_received_samples_total{user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 1 + cortex_distributor_received_samples_total{type="float",user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 1 + cortex_distributor_received_samples_total{type="histogram",user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 0 ` - require.NoError(t, testutil.GatherAndCompare(regs[0], strings.NewReader(expectedMetrics), metrics...)) } diff --git a/pkg/frontend/v2/frontend_scheduler_worker.go b/pkg/frontend/v2/frontend_scheduler_worker.go index bbe1e2ed74..61a4d33404 100644 --- a/pkg/frontend/v2/frontend_scheduler_worker.go +++ b/pkg/frontend/v2/frontend_scheduler_worker.go @@ -85,7 +85,7 @@ func (f *frontendSchedulerWorkers) AddressAdded(address string) { } level.Info(f.log).Log("msg", "adding connection to scheduler", "addr", address) - conn, err := f.connectToScheduler(context.Background(), address) + conn, err := f.connectToScheduler(address) if err != nil { level.Error(f.log).Log("msg", "error connecting to scheduler", "addr", address, "err", err) return @@ -126,14 +126,14 @@ func (f *frontendSchedulerWorkers) getWorkersCount() int { return len(f.workers) } -func (f *frontendSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { +func (f *frontendSchedulerWorkers) connectToScheduler(address string) (*grpc.ClientConn, error) { // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) if err != nil { return nil, err } - conn, err := grpc.DialContext(ctx, address, opts...) + conn, err := grpc.NewClient(address, opts...) if err != nil { return nil, err } diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index 3a20cdace7..d10834f3c8 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -89,7 +89,7 @@ func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error if err != nil { return nil, err } - conn, err := grpc.Dial(addr, dialOpts...) + conn, err := grpc.NewClient(addr, dialOpts...) if err != nil { return nil, err } diff --git a/pkg/ingester/client/compat.go b/pkg/ingester/client/compat.go index 915f575e4a..fb3969a94b 100644 --- a/pkg/ingester/client/compat.go +++ b/pkg/ingester/client/compat.go @@ -154,21 +154,39 @@ func SeriesSetToQueryResponse(s storage.SeriesSet) (*QueryResponse, error) { for s.Next() { series := s.At() samples := []cortexpb.Sample{} + histograms := []cortexpb.Histogram{} it = series.Iterator(it) - for it.Next() != chunkenc.ValNone { - t, v := it.At() - samples = append(samples, cortexpb.Sample{ - TimestampMs: t, - Value: v, - }) + for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { + switch valType { + case chunkenc.ValFloat: + t, v := it.At() + samples = append(samples, cortexpb.Sample{ + TimestampMs: t, + Value: v, + }) + case chunkenc.ValHistogram: + t, v := it.AtHistogram(nil) + histograms = append(histograms, cortexpb.HistogramToHistogramProto(t, v)) + case chunkenc.ValFloatHistogram: + t, v := it.AtFloatHistogram(nil) + histograms = append(histograms, cortexpb.FloatHistogramToHistogramProto(t, v)) + default: + panic("unhandled default case") + } } if err := it.Err(); err != nil { return nil, err } - result.Timeseries = append(result.Timeseries, cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), - Samples: samples, - }) + ts := cortexpb.TimeSeries{ + Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), + } + if len(samples) > 0 { + ts.Samples = samples + } + if len(histograms) > 0 { + ts.Histograms = histograms + } + result.Timeseries = append(result.Timeseries, ts) } return result, s.Err() diff --git a/pkg/ingester/client/cortex_util_test.go b/pkg/ingester/client/cortex_util_test.go index 30dd8bc9a1..3058026ebe 100644 --- a/pkg/ingester/client/cortex_util_test.go +++ b/pkg/ingester/client/cortex_util_test.go @@ -95,7 +95,7 @@ func TestStreamingSends(t *testing.T) { return listen.Dial() } - conn, err := grpc.DialContext(context.Background(), "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient("passthrough://bufnet", grpc.WithContextDialer(bufDialer), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) defer conn.Close() diff --git a/pkg/ingester/client/custom.go b/pkg/ingester/client/custom.go index 60e240c210..62c0f43930 100644 --- a/pkg/ingester/client/custom.go +++ b/pkg/ingester/client/custom.go @@ -37,7 +37,8 @@ func (m *QueryStreamResponse) ChunksSize() int { func (m *QueryStreamResponse) SamplesCount() (count int) { for _, cs := range m.Chunkseries { for _, c := range cs.Chunks { - if c.Encoding == int32(encoding.PrometheusXorChunk) { + switch c.Encoding { + case int32(encoding.PrometheusXorChunk), int32(encoding.PrometheusHistogramChunk), int32(encoding.PrometheusFloatHistogramChunk): count += int(binary.BigEndian.Uint16(c.Data)) } } diff --git a/pkg/ingester/client/custom_test.go b/pkg/ingester/client/custom_test.go new file mode 100644 index 0000000000..7213494ec2 --- /dev/null +++ b/pkg/ingester/client/custom_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/util" +) + +func TestSamplesCount(t *testing.T) { + floatChk := util.GenerateChunk(t, time.Second, model.Time(0), 100, encoding.PrometheusXorChunk) + histogramChk := util.GenerateChunk(t, time.Second, model.Time(0), 300, encoding.PrometheusHistogramChunk) + floatHistogramChk := util.GenerateChunk(t, time.Second, model.Time(0), 500, encoding.PrometheusFloatHistogramChunk) + for _, tc := range []struct { + name string + chunkSeries []TimeSeriesChunk + expectedCount int + }{ + { + name: "single float chunk", + chunkSeries: []TimeSeriesChunk{ + { + Chunks: []Chunk{ + {Encoding: int32(encoding.PrometheusXorChunk), Data: floatChk.Data.Bytes()}, + }, + }, + }, + expectedCount: 100, + }, + { + name: "single histogram chunk", + chunkSeries: []TimeSeriesChunk{ + { + Chunks: []Chunk{ + {Encoding: int32(encoding.PrometheusHistogramChunk), Data: histogramChk.Data.Bytes()}, + }, + }, + }, + expectedCount: 300, + }, + { + name: "single float histogram chunk", + chunkSeries: []TimeSeriesChunk{ + { + Chunks: []Chunk{ + {Encoding: int32(encoding.PrometheusFloatHistogramChunk), Data: floatHistogramChk.Data.Bytes()}, + }, + }, + }, + expectedCount: 500, + }, + { + name: "all chunks", + chunkSeries: []TimeSeriesChunk{ + { + Chunks: []Chunk{ + {Encoding: int32(encoding.PrometheusXorChunk), Data: floatChk.Data.Bytes()}, + {Encoding: int32(encoding.PrometheusHistogramChunk), Data: histogramChk.Data.Bytes()}, + {Encoding: int32(encoding.PrometheusFloatHistogramChunk), Data: floatHistogramChk.Data.Bytes()}, + }, + }, + }, + expectedCount: 900, + }, + } { + t.Run(tc.name, func(t *testing.T) { + c := (&QueryStreamResponse{Chunkseries: tc.chunkSeries}).SamplesCount() + require.Equal(t, tc.expectedCount, c) + }) + } +} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 5c99f8f991..e067341049 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -66,6 +67,7 @@ const ( const ( errTSDBCreateIncompatibleState = "cannot create a new TSDB while the ingester is not in active state (current state: %s)" + errTSDBIngestWithTimestamp = "err: %v. series=%s" // Using error.Wrap puts the message before the error and if the series is too long, its truncated. errTSDBIngest = "err: %v. timestamp=%s, series=%s" // Using error.Wrap puts the message before the error and if the series is too long, its truncated. errTSDBIngestExemplar = "err: %v. timestamp=%s, series=%s, exemplar=%s" @@ -194,8 +196,10 @@ type Ingester struct { cfg Config - metrics *ingesterMetrics - logger log.Logger + metrics *ingesterMetrics + validateMetrics *validation.ValidateMetrics + + logger log.Logger lifecycler *ring.Lifecycler limits *validation.Overrides @@ -659,6 +663,7 @@ func New(cfg Config, limits *validation.Overrides, registerer prometheus.Registe i.ingestionRate, &i.inflightPushRequests, &i.maxInflightQueryRequests) + i.validateMetrics = validation.NewValidateMetrics(registerer) // Replace specific metrics which we can't directly track but we need to read // them from the underlying system (ie. TSDB). @@ -943,7 +948,7 @@ func (i *Ingester) updateActiveSeries(ctx context.Context) { userDB.activeSeries.Purge(purgeTime) i.metrics.activeSeriesPerUser.WithLabelValues(userID).Set(float64(userDB.activeSeries.Active())) - if err := userDB.labelSetCounter.UpdateMetric(ctx, userDB, i.metrics.activeSeriesPerLabelSet); err != nil { + if err := userDB.labelSetCounter.UpdateMetric(ctx, userDB, i.metrics); err != nil { level.Warn(i.logger).Log("msg", "failed to update per labelSet metrics", "user", userID, "err", err) } } @@ -1078,6 +1083,65 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte firstPartialErr = errFn() } } + + handleAppendFailure = func(err error, timestampMs int64, lbls []cortexpb.LabelAdapter, copiedLabels labels.Labels) (rollback bool) { + // Check if the error is a soft error we can proceed on. If so, we keep track + // of it, so that we can return it back to the distributor, which will return a + // 400 error to the client. The client (Prometheus) will not retry on 400, and + // we actually ingested all samples which haven't failed. + switch cause := errors.Cause(err); { + case errors.Is(cause, storage.ErrOutOfBounds): + sampleOutOfBoundsCount++ + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, storage.ErrOutOfOrderSample): + sampleOutOfOrderCount++ + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, storage.ErrDuplicateSampleForTimestamp): + newValueForTimestampCount++ + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, storage.ErrTooOldSample): + sampleTooOldCount++ + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, errMaxSeriesPerUserLimitExceeded): + perUserSeriesLimitCount++ + updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause)) }) + + case errors.Is(cause, errMaxSeriesPerMetricLimitExceeded): + perMetricSeriesLimitCount++ + updateFirstPartial(func() error { + return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause)) + }) + + case errors.As(cause, &errMaxSeriesPerLabelSetLimitExceeded{}): + perLabelSetSeriesLimitCount++ + updateFirstPartial(func() error { + return makeMetricLimitError(perLabelsetSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause)) + }) + + case errors.Is(cause, histogram.ErrHistogramSpanNegativeOffset): + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, histogram.ErrHistogramSpansBucketsMismatch): + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, histogram.ErrHistogramNegativeBucketCount): + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, histogram.ErrHistogramCountNotBigEnough): + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + case errors.Is(cause, histogram.ErrHistogramCountMismatch): + updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) + + default: + rollback = true + } + return + } ) // Walk the samples, appending them to the users database @@ -1094,8 +1158,6 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte // To find out if any sample was added to this series, we keep old value. oldSucceededSamplesCount := succeededSamplesCount - nativeHistogramCount += len(ts.Histograms) - for _, s := range ts.Samples { var err error @@ -1119,50 +1181,9 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte failedSamplesCount++ - // Check if the error is a soft error we can proceed on. If so, we keep track - // of it, so that we can return it back to the distributor, which will return a - // 400 error to the client. The client (Prometheus) will not retry on 400, and - // we actually ingested all samples which haven't failed. - switch cause := errors.Cause(err); { - case errors.Is(cause, storage.ErrOutOfBounds): - sampleOutOfBoundsCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case errors.Is(cause, storage.ErrOutOfOrderSample): - sampleOutOfOrderCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case errors.Is(cause, storage.ErrDuplicateSampleForTimestamp): - newValueForTimestampCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case errors.Is(cause, storage.ErrTooOldSample): - sampleTooOldCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case errors.Is(cause, errMaxSeriesPerUserLimitExceeded): - perUserSeriesLimitCount++ - updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause)) }) - continue - - case errors.Is(cause, errMaxSeriesPerMetricLimitExceeded): - perMetricSeriesLimitCount++ - updateFirstPartial(func() error { - return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause)) - }) - continue - case errors.As(cause, &errMaxSeriesPerLabelSetLimitExceeded{}): - perLabelSetSeriesLimitCount++ - updateFirstPartial(func() error { - return makeMetricLimitError(perLabelsetSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause)) - }) + if rollback := handleAppendFailure(err, s.TimestampMs, ts.Labels, copiedLabels); !rollback { continue } - // The error looks an issue on our side, so we should rollback if rollbackErr := app.Rollback(); rollbackErr != nil { level.Warn(logutil.WithContext(ctx, i.logger)).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) @@ -1171,6 +1192,49 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte return nil, wrapWithUser(err, userID) } + if i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms { + for _, hp := range ts.Histograms { + var ( + err error + h *histogram.Histogram + fh *histogram.FloatHistogram + ) + + if hp.GetCountFloat() > 0 { + fh = cortexpb.FloatHistogramProtoToFloatHistogram(hp) + } else { + h = cortexpb.HistogramProtoToHistogram(hp) + } + + if ref != 0 { + if _, err = app.AppendHistogram(ref, copiedLabels, hp.TimestampMs, h, fh); err == nil { + succeededSamplesCount++ + continue + } + } else { + // Copy the label set because both TSDB and the active series tracker may retain it. + copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(ts.Labels) + if ref, err = app.AppendHistogram(0, copiedLabels, hp.TimestampMs, h, fh); err == nil { + succeededSamplesCount++ + continue + } + } + + failedSamplesCount++ + + if rollback := handleAppendFailure(err, hp.TimestampMs, ts.Labels, copiedLabels); !rollback { + continue + } + // The error looks an issue on our side, so we should rollback + if rollbackErr := app.Rollback(); rollbackErr != nil { + level.Warn(logutil.WithContext(ctx, i.logger)).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) + } + return nil, wrapWithUser(err, userID) + } + } else { + nativeHistogramCount += len(ts.Histograms) + } + if i.cfg.ActiveSeriesMetricsEnabled && succeededSamplesCount > oldSucceededSamplesCount { db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, func(l labels.Labels) labels.Labels { // we must already have copied the labels if succeededSamplesCount has been incremented. @@ -1235,32 +1299,32 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte i.metrics.ingestedExemplarsFail.Add(float64(failedExemplarsCount)) if sampleOutOfBoundsCount > 0 { - validation.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Add(float64(sampleOutOfBoundsCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Add(float64(sampleOutOfBoundsCount)) } if sampleOutOfOrderCount > 0 { - validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount)) } if sampleTooOldCount > 0 { - validation.DiscardedSamples.WithLabelValues(sampleTooOld, userID).Add(float64(sampleTooOldCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(sampleTooOld, userID).Add(float64(sampleTooOldCount)) } if newValueForTimestampCount > 0 { - validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount)) } if perUserSeriesLimitCount > 0 { - validation.DiscardedSamples.WithLabelValues(perUserSeriesLimit, userID).Add(float64(perUserSeriesLimitCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(perUserSeriesLimit, userID).Add(float64(perUserSeriesLimitCount)) } if perMetricSeriesLimitCount > 0 { - validation.DiscardedSamples.WithLabelValues(perMetricSeriesLimit, userID).Add(float64(perMetricSeriesLimitCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(perMetricSeriesLimit, userID).Add(float64(perMetricSeriesLimitCount)) } if perLabelSetSeriesLimitCount > 0 { - validation.DiscardedSamples.WithLabelValues(perLabelsetSeriesLimit, userID).Add(float64(perLabelSetSeriesLimitCount)) + i.validateMetrics.DiscardedSamples.WithLabelValues(perLabelsetSeriesLimit, userID).Add(float64(perLabelSetSeriesLimitCount)) } - if nativeHistogramCount > 0 { - validation.DiscardedSamples.WithLabelValues(nativeHistogramSample, userID).Add(float64(nativeHistogramCount)) + if !i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms && nativeHistogramCount > 0 { + i.validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramSample, userID).Add(float64(nativeHistogramCount)) } - // Distributor counts both samples and metadata, so for consistency ingester does the same. + // Distributor counts both samples, metadata and histograms, so for consistency ingester does the same. i.ingestionRate.Add(int64(succeededSamplesCount + ingestedMetadata)) switch req.Source { @@ -1849,6 +1913,10 @@ func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, th switch meta.Chunk.Encoding() { case chunkenc.EncXOR: ch.Encoding = int32(encoding.PrometheusXorChunk) + case chunkenc.EncHistogram: + ch.Encoding = int32(encoding.PrometheusHistogramChunk) + case chunkenc.EncFloatHistogram: + ch.Encoding = int32(encoding.PrometheusFloatHistogramChunk) default: return 0, 0, 0, errors.Errorf("unknown chunk encoding from TSDB chunk querier: %v", meta.Chunk.Encoding()) } @@ -2014,6 +2082,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(), OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax, EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks. + EnableNativeHistograms: i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms, }, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) @@ -2529,7 +2598,7 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes i.deleteUserMetadata(userID) i.metrics.deletePerUserMetrics(userID) - validation.DeletePerUserValidationMetrics(userID, i.logger) + validation.DeletePerUserValidationMetrics(i.validateMetrics, userID, i.logger) // And delete local data. if err := os.RemoveAll(dir); err != nil { @@ -2603,7 +2672,7 @@ func (i *Ingester) getOrCreateUserMetadata(userID string) *userMetricsMetadata { // Ensure it was not created between switching locks. userMetadata, ok := i.usersMetadata[userID] if !ok { - userMetadata = newMetadataMap(i.limiter, i.metrics, userID) + userMetadata = newMetadataMap(i.limiter, i.metrics, i.validateMetrics, userID) i.usersMetadata[userID] = userMetadata } return userMetadata @@ -2791,7 +2860,12 @@ func wrappedTSDBIngestErr(ingestErr error, timestamp model.Time, labels []cortex return nil } - return fmt.Errorf(errTSDBIngest, ingestErr, timestamp.Time().UTC().Format(time.RFC3339Nano), cortexpb.FromLabelAdaptersToLabels(labels).String()) + switch { + case errors.Is(ingestErr, storage.ErrDuplicateSampleForTimestamp): + return fmt.Errorf(errTSDBIngestWithTimestamp, ingestErr, cortexpb.FromLabelAdaptersToLabels(labels).String()) + default: + return fmt.Errorf(errTSDBIngest, ingestErr, timestamp.Time().UTC().Format(time.RFC3339Nano), cortexpb.FromLabelAdaptersToLabels(labels).String()) + } } func wrappedTSDBIngestExemplarErr(ingestErr error, timestamp model.Time, seriesLabels, exemplarLabels []cortexpb.LabelAdapter) error { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 98d876461b..6da1a940e8 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "io" "math" "net" @@ -27,6 +26,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -42,8 +42,6 @@ import ( "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -52,13 +50,23 @@ import ( "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/ring" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" "github.com/cortexproject/cortex/pkg/util/validation" ) +var ( + encodings = []encoding.Encoding{ + encoding.PrometheusXorChunk, + encoding.PrometheusHistogramChunk, + encoding.PrometheusFloatHistogramChunk, + } +) + func runTestQuery(ctx context.Context, t *testing.T, ing *Ingester, ty labels.MatchType, n, v string) (model.Matrix, *client.QueryRequest, error) { return runTestQueryTimes(ctx, t, ing, ty, n, v, model.Earliest, model.Latest) } @@ -113,18 +121,22 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { userID := "1" registry := prometheus.NewRegistry() - limits.MaxSeriesPerLabelSet = []validation.MaxSeriesPerLabelSet{ + limits.LimitsPerLabelSet = []validation.LimitsPerLabelSet{ { LabelSet: labels.FromMap(map[string]string{ "label1": "value1", }), - Limit: 3, + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 3, + }, }, { LabelSet: labels.FromMap(map[string]string{ "label2": "value2", }), - Limit: 2, + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 2, + }, }, } tenantLimits := newMockTenantLimits(map[string]*validation.Limits{userID: &limits}) @@ -139,8 +151,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { require.NoError(t, os.Mkdir(chunksDir, os.ModePerm)) require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) - ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry) - registry.MustRegister(validation.DiscardedSamples) + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE @@ -152,12 +163,12 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { samples := []cortexpb.Sample{{Value: 2, TimestampMs: 10}} // Create first series within the limits - for _, set := range limits.MaxSeriesPerLabelSet { + for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} for _, lbl := range set.LabelSet { lbls = append(lbls, lbl.Name, lbl.Value) } - for i := 0; i < set.Limit; i++ { + for i := 0; i < set.Limits.MaxSeries; i++ { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) @@ -166,14 +177,18 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { ing.updateActiveSeries(ctx) require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset", "cortex_discarded_samples_total")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) // Should impose limits - for _, set := range limits.MaxSeriesPerLabelSet { + for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} for _, lbl := range set.LabelSet { lbls = append(lbls, lbl.Name, lbl.Value) @@ -191,29 +206,39 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 2 - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset", "cortex_discarded_samples_total")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) // Should apply composite limits - limits.MaxSeriesPerLabelSet = append(limits.MaxSeriesPerLabelSet, - validation.MaxSeriesPerLabelSet{LabelSet: labels.FromMap(map[string]string{ + limits.LimitsPerLabelSet = append(limits.LimitsPerLabelSet, + validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{ "comp1": "compValue1", }), - Limit: 10, + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 10, + }, }, - validation.MaxSeriesPerLabelSet{LabelSet: labels.FromMap(map[string]string{ + validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{ "comp2": "compValue2", }), - Limit: 10, + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 10, + }, }, - validation.MaxSeriesPerLabelSet{LabelSet: labels.FromMap(map[string]string{ + validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{ "comp1": "compValue1", "comp2": "compValue2", }), - Limit: 2, + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 2, + }, }, ) @@ -228,14 +253,21 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 2 - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",user="1"} 0 - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\"}",user="1"} 0 - cortex_ingester_active_series_per_labelset{labelset="{comp2=\"compValue2\"}",user="1"} 0 - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset", "cortex_discarded_samples_total")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 0 + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 0 + cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 0 + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) // Adding 5 metrics with only 1 label for i := 0; i < 5; i++ { @@ -266,22 +298,31 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 3 - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",user="1"} 2 - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\"}",user="1"} 7 - cortex_ingester_active_series_per_labelset{labelset="{comp2=\"compValue2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset", "cortex_discarded_samples_total")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 7 + cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) // Should bootstrap and apply limits when configuration change - limits.MaxSeriesPerLabelSet = append(limits.MaxSeriesPerLabelSet, - validation.MaxSeriesPerLabelSet{LabelSet: labels.FromMap(map[string]string{ + limits.LimitsPerLabelSet = append(limits.LimitsPerLabelSet, + validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{ labels.MetricName: "metric_name", "comp2": "compValue2", }), - Limit: 3, // we already have 2 so we need to allow 1 more + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 3, // we already have 2 so we need to allow 1 more + }, }, ) @@ -304,43 +345,59 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { ing.updateActiveSeries(ctx) require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{__name__=\"metric_name\", comp2=\"compValue2\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",user="1"} 2 - cortex_ingester_active_series_per_labelset{labelset="{comp1=\"compValue1\"}",user="1"} 7 - cortex_ingester_active_series_per_labelset{labelset="{comp2=\"compValue2\"}",user="1"} 3 - `), "cortex_ingester_active_series_per_labelset")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{__name__=\"metric_name\", comp2=\"compValue2\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10 + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{__name__=\"metric_name\", comp2=\"compValue2\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2 + cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 7 + cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 3 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset")) // Should remove metrics when the limits is removed - limits.MaxSeriesPerLabelSet = limits.MaxSeriesPerLabelSet[:2] + limits.LimitsPerLabelSet = limits.LimitsPerLabelSet[:2] b, err = json.Marshal(limits) require.NoError(t, err) require.NoError(t, limits.UnmarshalJSON(b)) tenantLimits.setLimits(userID, &limits) ing.updateActiveSeries(ctx) require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset")) // Should persist between restarts services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck registry = prometheus.NewRegistry() - ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry) + ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) ing.updateActiveSeries(ctx) require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` - # HELP cortex_ingester_active_series_per_labelset Number of currently active series per user and labelset. - # TYPE cortex_ingester_active_series_per_labelset gauge - cortex_ingester_active_series_per_labelset{labelset="{label1=\"value1\"}",user="1"} 3 - cortex_ingester_active_series_per_labelset{labelset="{label2=\"value2\"}",user="1"} 2 - `), "cortex_ingester_active_series_per_labelset")) + # HELP cortex_ingester_limits_per_labelset Limits per user and labelset. + # TYPE cortex_ingester_limits_per_labelset gauge + cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + # HELP cortex_ingester_usage_per_labelset Current usage per user and labelset. + # TYPE cortex_ingester_usage_per_labelset gauge + cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 + cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset")) services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck } @@ -358,7 +415,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) blocksIngesterGenerator := func() *Ingester { - ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, nil) + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, prometheus.NewRegistry(), true) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE @@ -480,7 +537,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { require.NoError(t, os.Mkdir(blocksDir, os.ModePerm)) blocksIngesterGenerator := func() *Ingester { - ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, nil) + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, prometheus.NewRegistry(), true) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE @@ -601,6 +658,8 @@ func TestIngester_Push(t *testing.T) { } userID := "test" + testHistogram := cortexpb.HistogramToHistogramProto(10, histogram_util.GenerateTestHistogram(1)) + testFloatHistogram := cortexpb.FloatHistogramToHistogramProto(11, histogram_util.GenerateTestFloatHistogram(1)) tests := map[string]struct { reqs []*cortexpb.WriteRequest expectedErr error @@ -612,8 +671,9 @@ func TestIngester_Push(t *testing.T) { disableActiveSeries bool maxExemplars int oooTimeWindow time.Duration + disableNativeHistogram bool }{ - "should record native histogram": { + "should record native histogram discarded": { reqs: []*cortexpb.WriteRequest{ cortexpb.ToWriteRequest( []labels.Labels{metricLabels}, @@ -635,7 +695,8 @@ func TestIngester_Push(t *testing.T) { expectedMetadataIngested: []*cortexpb.MetricMetadata{ {MetricFamilyName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE}, }, - additionalMetrics: []string{}, + additionalMetrics: []string{}, + disableNativeHistogram: true, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -891,23 +952,36 @@ func TestIngester_Push(t *testing.T) { []labels.Labels{metricLabels}, []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, - nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(9, histogram_util.GenerateTestHistogram(1)), + }, cortexpb.API), }, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfOrderSample, model.Time(9), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 10}}}, }, + additionalMetrics: []string{ + "cortex_ingester_tsdb_out_of_order_samples_total", + "cortex_ingester_tsdb_head_out_of_order_samples_appended_total", + }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter cortex_ingester_ingested_samples_total 1 # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. # TYPE cortex_ingester_ingested_samples_failures_total counter - cortex_ingester_ingested_samples_failures_total 1 + cortex_ingester_ingested_samples_failures_total 2 # HELP cortex_ingester_memory_users The current number of users in memory. # TYPE cortex_ingester_memory_users gauge cortex_ingester_memory_users 1 + # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. + # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0 + # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. + # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="test"} 1 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="test"} 1 # HELP cortex_ingester_memory_series The current number of series in memory. # TYPE cortex_ingester_memory_series gauge cortex_ingester_memory_series 1 @@ -919,7 +993,7 @@ func TestIngester_Push(t *testing.T) { cortex_ingester_memory_series_removed_total{user="test"} 0 # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{reason="sample-out-of-order",user="test"} 1 + cortex_discarded_samples_total{reason="sample-out-of-order",user="test"} 2 # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 @@ -937,7 +1011,9 @@ func TestIngester_Push(t *testing.T) { []labels.Labels{metricLabels}, []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (86400 * 1000)}}, nil, - nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(1575043969-(86400*1000), histogram_util.GenerateTestHistogram(1)), + }, cortexpb.API), }, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfBounds, model.Time(1575043969-(86400*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), @@ -950,7 +1026,7 @@ func TestIngester_Push(t *testing.T) { cortex_ingester_ingested_samples_total 1 # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. # TYPE cortex_ingester_ingested_samples_failures_total counter - cortex_ingester_ingested_samples_failures_total 1 + cortex_ingester_ingested_samples_failures_total 2 # HELP cortex_ingester_memory_users The current number of users in memory. # TYPE cortex_ingester_memory_users gauge cortex_ingester_memory_users 1 @@ -965,7 +1041,7 @@ func TestIngester_Push(t *testing.T) { cortex_ingester_memory_series_removed_total{user="test"} 0 # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{reason="sample-out-of-bounds",user="test"} 1 + cortex_discarded_samples_total{reason="sample-out-of-bounds",user="test"} 2 # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 @@ -1076,7 +1152,7 @@ func TestIngester_Push(t *testing.T) { nil, cortexpb.API), }, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrDuplicateSampleForTimestamp, model.Time(1575043969), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), + expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.NewDuplicateFloatErr(1575043969, 2, 1), model.Time(1575043969), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, @@ -1183,15 +1259,161 @@ func TestIngester_Push(t *testing.T) { cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 0 `, }, + "should succeed when only native histogram present if enabled": { + reqs: []*cortexpb.WriteRequest{ + cortexpb.ToWriteRequest( + []labels.Labels{metricLabels}, + nil, + nil, + []cortexpb.Histogram{testHistogram}, + cortexpb.API), + }, + expectedErr: nil, + expectedIngested: []cortexpb.TimeSeries{ + {Labels: metricLabelAdapters, Histograms: []cortexpb.Histogram{testHistogram}}, + }, + additionalMetrics: []string{ + "cortex_ingester_tsdb_head_samples_appended_total", + }, + expectedMetrics: ` + # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. + # TYPE cortex_ingester_ingested_samples_total counter + cortex_ingester_ingested_samples_total 1 + # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. + # TYPE cortex_ingester_ingested_samples_failures_total counter + cortex_ingester_ingested_samples_failures_total 0 + # HELP cortex_ingester_memory_users The current number of users in memory. + # TYPE cortex_ingester_memory_users gauge + cortex_ingester_memory_users 1 + # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. + # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0 + # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. + # TYPE cortex_ingester_tsdb_head_samples_appended_total counter + cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 0 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 1 + # HELP cortex_ingester_memory_series The current number of series in memory. + # TYPE cortex_ingester_memory_series gauge + cortex_ingester_memory_series 1 + # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. + # TYPE cortex_ingester_memory_series_created_total counter + cortex_ingester_memory_series_created_total{user="test"} 1 + # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. + # TYPE cortex_ingester_memory_series_removed_total counter + cortex_ingester_memory_series_removed_total{user="test"} 0 + # HELP cortex_discarded_samples_total The total number of samples that were discarded. + # TYPE cortex_discarded_samples_total counter + # HELP cortex_ingester_active_series Number of currently active series per user. + # TYPE cortex_ingester_active_series gauge + cortex_ingester_active_series{user="test"} 1 + `, + }, + "should succeed when only float native histogram present if enabled": { + reqs: []*cortexpb.WriteRequest{ + cortexpb.ToWriteRequest( + []labels.Labels{metricLabels}, + nil, + nil, + []cortexpb.Histogram{testFloatHistogram}, + cortexpb.API), + }, + expectedErr: nil, + expectedIngested: []cortexpb.TimeSeries{ + {Labels: metricLabelAdapters, Histograms: []cortexpb.Histogram{testFloatHistogram}}, + }, + additionalMetrics: []string{ + "cortex_ingester_tsdb_head_samples_appended_total", + }, + expectedMetrics: ` + # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. + # TYPE cortex_ingester_ingested_samples_total counter + cortex_ingester_ingested_samples_total 1 + # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. + # TYPE cortex_ingester_ingested_samples_failures_total counter + cortex_ingester_ingested_samples_failures_total 0 + # HELP cortex_ingester_memory_users The current number of users in memory. + # TYPE cortex_ingester_memory_users gauge + cortex_ingester_memory_users 1 + # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. + # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0 + # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. + # TYPE cortex_ingester_tsdb_head_samples_appended_total counter + cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 0 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 1 + # HELP cortex_ingester_memory_series The current number of series in memory. + # TYPE cortex_ingester_memory_series gauge + cortex_ingester_memory_series 1 + # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. + # TYPE cortex_ingester_memory_series_created_total counter + cortex_ingester_memory_series_created_total{user="test"} 1 + # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. + # TYPE cortex_ingester_memory_series_removed_total counter + cortex_ingester_memory_series_removed_total{user="test"} 0 + # HELP cortex_discarded_samples_total The total number of samples that were discarded. + # TYPE cortex_discarded_samples_total counter + # HELP cortex_ingester_active_series Number of currently active series per user. + # TYPE cortex_ingester_active_series gauge + cortex_ingester_active_series{user="test"} 1 + `, + }, + "should fail to ingest histogram due to OOO native histogram. Sample and histogram has same timestamp but sample got ingested first": { + reqs: []*cortexpb.WriteRequest{ + cortexpb.ToWriteRequest( + []labels.Labels{metricLabels}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, + nil, + []cortexpb.Histogram{testHistogram}, + cortexpb.API), + }, + expectedErr: nil, + expectedIngested: []cortexpb.TimeSeries{ + {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 10}}}, + }, + additionalMetrics: []string{ + "cortex_ingester_tsdb_head_samples_appended_total", + "cortex_ingester_tsdb_out_of_order_samples_total", + }, + expectedMetrics: ` + # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. + # TYPE cortex_ingester_ingested_samples_total counter + cortex_ingester_ingested_samples_total 2 + # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. + # TYPE cortex_ingester_ingested_samples_failures_total counter + cortex_ingester_ingested_samples_failures_total 0 + # HELP cortex_ingester_memory_users The current number of users in memory. + # TYPE cortex_ingester_memory_users gauge + cortex_ingester_memory_users 1 + # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. + # TYPE cortex_ingester_tsdb_head_samples_appended_total counter + cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 1 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 0 + # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. + # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="test"} 0 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="test"} 1 + # HELP cortex_ingester_memory_series The current number of series in memory. + # TYPE cortex_ingester_memory_series gauge + cortex_ingester_memory_series 1 + # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. + # TYPE cortex_ingester_memory_series_created_total counter + cortex_ingester_memory_series_created_total{user="test"} 1 + # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. + # TYPE cortex_ingester_memory_series_removed_total counter + cortex_ingester_memory_series_removed_total{user="test"} 0 + # HELP cortex_discarded_samples_total The total number of samples that were discarded. + # TYPE cortex_discarded_samples_total counter + # HELP cortex_ingester_active_series Number of currently active series per user. + # TYPE cortex_ingester_active_series gauge + cortex_ingester_active_series{user="test"} 1 + `, + }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { registry := prometheus.NewRegistry() - registry.MustRegister(validation.DiscardedSamples) - validation.DiscardedSamples.Reset() - // Create a mocked ingester cfg := defaultIngesterTestConfig(t) cfg.LifecyclerConfig.JoinAfter = 0 @@ -1200,7 +1422,7 @@ func TestIngester_Push(t *testing.T) { limits := defaultLimitsTestConfig() limits.MaxExemplars = testData.maxExemplars limits.OutOfOrderTimeWindow = model.Duration(testData.oooTimeWindow) - i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry) + i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, !testData.disableNativeHistogram) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -1278,6 +1500,179 @@ func TestIngester_Push(t *testing.T) { } } +// Referred from https://github.com/prometheus/prometheus/blob/v2.52.1/model/histogram/histogram_test.go#L985. +func TestIngester_PushNativeHistogramErrors(t *testing.T) { + metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}} + metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters) + for _, tc := range []struct { + name string + histograms []cortexpb.Histogram + expectedErr error + }{ + { + name: "rejects histogram with NaN observations that has its Count (2) lower than the actual total of buckets (2 + 1)", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + ZeroCount: 2, + Count: 2, + Sum: math.NaN(), + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }), + }, + expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 2: %w", histogram.ErrHistogramCountNotBigEnough), + }, + { + name: "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + ZeroCount: 2, + Count: 4, + Sum: 333, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }), + }, + expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 4: %w", histogram.ErrHistogramCountMismatch), + }, + { + name: "rejects histogram that has too few negative buckets", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{}, + }), + }, + expectedErr: fmt.Errorf("negative side: spans need 1 buckets, have 0 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch), + }, + { + name: "rejects histogram that has too few positive buckets", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{}, + }), + }, + expectedErr: fmt.Errorf("positive side: spans need 1 buckets, have 0 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch), + }, + { + name: "rejects histogram that has too many negative buckets", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }), + }, + expectedErr: fmt.Errorf("negative side: spans need 1 buckets, have 2 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch), + }, + { + name: "rejects histogram that has too many positive buckets", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }), + }, + expectedErr: fmt.Errorf("positive side: spans need 1 buckets, have 2 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch), + }, + { + name: "rejects a histogram that has a negative span with a negative offset", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }), + }, + expectedErr: fmt.Errorf("negative side: span number 2 with offset -1: %w", histogram.ErrHistogramSpanNegativeOffset), + }, + { + name: "rejects a histogram that has a positive span with a negative offset", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }), + }, + expectedErr: fmt.Errorf("positive side: span number 2 with offset -1: %w", histogram.ErrHistogramSpanNegativeOffset), + }, + { + name: "rejects a histogram that has a negative span with a negative count", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{-1}, + }), + }, + expectedErr: fmt.Errorf("negative side: bucket number 1 has observation count of -1: %w", histogram.ErrHistogramNegativeBucketCount), + }, + { + name: "rejects a histogram that has a positive span with a negative count", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, + PositiveBuckets: []int64{-1}, + }), + }, + expectedErr: fmt.Errorf("positive side: bucket number 1 has observation count of -1: %w", histogram.ErrHistogramNegativeBucketCount), + }, + { + name: "rejects a histogram that has a lower count than count in buckets", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + Count: 0, + NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, + PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }), + }, + expectedErr: fmt.Errorf("2 observations found in buckets, but the Count field is 0: %w", histogram.ErrHistogramCountMismatch), + }, + { + name: "rejects a histogram that doesn't count the zero bucket in its count", + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ + Count: 2, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, + PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }), + }, + expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 2: %w", histogram.ErrHistogramCountMismatch), + }, + } { + t.Run(tc.name, func(t *testing.T) { + registry := prometheus.NewRegistry() + + // Create a mocked ingester + cfg := defaultIngesterTestConfig(t) + cfg.LifecyclerConfig.JoinAfter = 0 + + limits := defaultLimitsTestConfig() + i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, true) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + ctx := user.InjectOrgID(context.Background(), userID) + + // Wait until the ingester is ACTIVE + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + return i.lifecycler.GetState() + }) + + req := cortexpb.ToWriteRequest([]labels.Labels{metricLabels}, nil, nil, tc.histograms, cortexpb.API) + // Push timeseries + _, err = i.Push(ctx, req) + assert.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(tc.expectedErr, model.Time(10), metricLabelAdapters), userID).Error()), err) + + require.Equal(t, testutil.ToFloat64(i.metrics.ingestedSamplesFail), float64(1)) + }) + } +} + func TestIngester_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *testing.T) { metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}} metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters) @@ -1752,7 +2147,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { return instanceLimits } - ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry) + ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry, true) require.NoError(b, err) require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester)) defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck @@ -1812,7 +2207,7 @@ func Test_Ingester_LabelNames(t *testing.T) { expected := []string{"__name__", "status", "route"} // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -1856,7 +2251,7 @@ func Test_Ingester_LabelValues(t *testing.T) { } // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -1975,7 +2370,7 @@ func Test_Ingester_Query(t *testing.T) { } // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2015,7 +2410,7 @@ func Test_Ingester_Query(t *testing.T) { } } func TestIngester_Query_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2039,7 +2434,7 @@ func TestIngester_Query_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { } func TestIngester_LabelValues_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2059,7 +2454,7 @@ func TestIngester_LabelValues_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { } func TestIngester_LabelNames_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2084,7 +2479,7 @@ func TestIngester_Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) { cfg := defaultIngesterTestConfig(t) cfg.LifecyclerConfig.JoinAfter = 10 * time.Second - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2132,7 +2527,7 @@ func TestIngester_getOrCreateTSDB_ShouldNotAllowToCreateTSDBIfIngesterStateIsNot cfg := defaultIngesterTestConfig(t) cfg.LifecyclerConfig.JoinAfter = 60 * time.Second - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2292,7 +2687,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { } // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2397,7 +2792,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples const maxBatchSize = 1000 // Create ingester. - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) t.Cleanup(func() { @@ -2445,77 +2840,92 @@ func TestIngester_QueryStream(t *testing.T) { // Create ingester. cfg := defaultIngesterTestConfig(t) - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck - - // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { - return i.lifecycler.GetState() - }) + for _, enc := range encodings { + t.Run(enc.String(), func(t *testing.T) { + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck - // Push series. - ctx := user.InjectOrgID(context.Background(), userID) - lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}} - req, expectedResponseChunks := mockWriteRequest(t, lbls, 123000, 456) - _, err = i.Push(ctx, req) - require.NoError(t, err) + // Wait until it's ACTIVE. + test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + return i.lifecycler.GetState() + }) - // Create a GRPC server used to query back the data. - serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor)) - defer serv.GracefulStop() - client.RegisterIngesterServer(serv, i) + // Push series. + ctx := user.InjectOrgID(context.Background(), userID) + lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}} + var ( + req *cortexpb.WriteRequest + expectedResponseChunks *client.QueryStreamResponse + ) + switch enc { + case encoding.PrometheusXorChunk: + req, expectedResponseChunks = mockWriteRequest(t, lbls, 123000, 456) + case encoding.PrometheusHistogramChunk: + req, expectedResponseChunks = mockHistogramWriteRequest(t, lbls, 123000, 456, false) + case encoding.PrometheusFloatHistogramChunk: + req, expectedResponseChunks = mockHistogramWriteRequest(t, lbls, 123000, 456, true) + } + _, err = i.Push(ctx, req) + require.NoError(t, err) - listener, err := net.Listen("tcp", "localhost:0") - require.NoError(t, err) + // Create a GRPC server used to query back the data. + serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor)) + defer serv.GracefulStop() + client.RegisterIngesterServer(serv, i) - go func() { - require.NoError(t, serv.Serve(listener)) - }() + listener, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) - // Query back the series using GRPC streaming. - c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig()) - require.NoError(t, err) - defer c.Close() + go func() { + require.NoError(t, serv.Serve(listener)) + }() - queryRequest := &client.QueryRequest{ - StartTimestampMs: 0, - EndTimestampMs: 200000, - Matchers: []*client.LabelMatcher{{ - Type: client.EQUAL, - Name: model.MetricNameLabel, - Value: "foo", - }}, - } + // Query back the series using GRPC streaming. + c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig()) + require.NoError(t, err) + defer c.Close() + + queryRequest := &client.QueryRequest{ + StartTimestampMs: 0, + EndTimestampMs: 200000, + Matchers: []*client.LabelMatcher{{ + Type: client.EQUAL, + Name: model.MetricNameLabel, + Value: "foo", + }}, + } - chunksTest := func(t *testing.T) { - s, err := c.QueryStream(ctx, queryRequest) - require.NoError(t, err) + chunksTest := func(t *testing.T) { + s, err := c.QueryStream(ctx, queryRequest) + require.NoError(t, err) - count := 0 - var lastResp *client.QueryStreamResponse - for { - resp, err := s.Recv() - if err == io.EOF { - break + count := 0 + var lastResp *client.QueryStreamResponse + for { + resp, err := s.Recv() + if err == io.EOF { + break + } + require.NoError(t, err) + count += len(resp.Chunkseries) + lastResp = resp + } + require.Equal(t, 1, count) + require.Equal(t, expectedResponseChunks, lastResp) } - require.NoError(t, err) - count += len(resp.Chunkseries) - lastResp = resp - } - require.Equal(t, 1, count) - require.Equal(t, expectedResponseChunks, lastResp) - } - t.Run("chunks", chunksTest) + t.Run("chunks", chunksTest) + }) + } } func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { // Create ingester. cfg := defaultIngesterTestConfig(t) - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2650,7 +3060,7 @@ func benchmarkQueryStream(b *testing.B) { cfg := defaultIngesterTestConfig(b) // Create ingester. - i, err := prepareIngesterWithBlocksStorage(b, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(b, cfg, prometheus.NewRegistry()) require.NoError(b, err) require.NoError(b, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2735,11 +3145,66 @@ func mockWriteRequest(t *testing.T, lbls labels.Labels, value float64, timestamp return req, expectedQueryStreamResChunks } +func mockHistogramWriteRequest(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpb.WriteRequest, *client.QueryStreamResponse) { + var ( + histograms []cortexpb.Histogram + h *histogram.Histogram + fh *histogram.FloatHistogram + c chunkenc.Chunk + ) + if float { + fh = histogram_util.GenerateTestFloatHistogram(value) + histograms = []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(timestampMs, fh), + } + c = chunkenc.NewFloatHistogramChunk() + } else { + h = histogram_util.GenerateTestHistogram(value) + histograms = []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(timestampMs, h), + } + c = chunkenc.NewHistogramChunk() + } + + app, err := c.Appender() + require.NoError(t, err) + if float { + _, _, _, err = app.AppendFloatHistogram(nil, timestampMs, fh, true) + } else { + _, _, _, err = app.AppendHistogram(nil, timestampMs, h, true) + } + require.NoError(t, err) + c.Compact() + + req := cortexpb.ToWriteRequest([]labels.Labels{lbls}, nil, nil, histograms, cortexpb.API) + enc := int32(encoding.PrometheusHistogramChunk) + if float { + enc = int32(encoding.PrometheusFloatHistogramChunk) + } + expectedQueryStreamResChunks := &client.QueryStreamResponse{ + Chunkseries: []client.TimeSeriesChunk{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Chunks: []client.Chunk{ + { + StartTimestampMs: timestampMs, + EndTimestampMs: timestampMs, + Encoding: enc, + Data: c.Bytes(), + }, + }, + }, + }, + } + + return req, expectedQueryStreamResChunks +} + func prepareIngesterWithBlocksStorage(t testing.TB, ingesterCfg Config, registerer prometheus.Registerer) (*Ingester, error) { - return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer) + return prepareIngesterWithBlocksStorageAndLimits(t, ingesterCfg, defaultLimitsTestConfig(), nil, "", registerer, true) } -func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer) (*Ingester, error) { +func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, limits validation.Limits, tenantLimits validation.TenantLimits, dataDir string, registerer prometheus.Registerer, nativeHistograms bool) (*Ingester, error) { // Create a data dir if none has been provided. if dataDir == "" { dataDir = t.TempDir() @@ -2755,6 +3220,7 @@ func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, ingesterCfg.BlocksStorageConfig.TSDB.Dir = dataDir ingesterCfg.BlocksStorageConfig.Bucket.Backend = "filesystem" ingesterCfg.BlocksStorageConfig.Bucket.Filesystem.Directory = bucketDir + ingesterCfg.BlocksStorageConfig.TSDB.EnableNativeHistograms = nativeHistograms ingester, err := New(ingesterCfg, overrides, registerer, log.NewNopLogger()) if err != nil { @@ -2897,7 +3363,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { // setup the tsdbs dir testData.setup(t, tempDir) - ingester, err := New(ingesterCfg, overrides, nil, log.NewNopLogger()) + ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger()) require.NoError(t, err) startErr := services.StartAndAwaitRunning(context.Background(), ingester) @@ -2937,7 +3403,7 @@ func TestIngester_shipBlocks(t *testing.T) { cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -2978,7 +3444,7 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) // Use in-memory bucket. @@ -3027,7 +3493,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) // Use in-memory bucket. @@ -3071,7 +3537,7 @@ func TestIngester_sholdUpdateCacheShippedBlocks(t *testing.T) { cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, i)) @@ -3110,7 +3576,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, i)) @@ -3151,7 +3617,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout = 0 // Will not run the loop, but will allow us to close any TSDB fast. // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, i)) @@ -3204,7 +3670,7 @@ func TestIngester_idleCloseEmptyTSDB(t *testing.T) { cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout = 0 // Will not run the loop, but will allow us to close any TSDB fast. // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, i)) @@ -3251,7 +3717,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { cfg.LifecyclerConfig.JoinAfter = 0 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -3584,7 +4050,7 @@ func Test_Ingester_UserStats(t *testing.T) { } // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -3632,7 +4098,7 @@ func Test_Ingester_AllUserStats(t *testing.T) { } // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -3960,7 +4426,7 @@ func TestHeadCompactionOnStartup(t *testing.T) { ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost" ingesterCfg.BlocksStorageConfig.TSDB.Retention = 2 * 24 * time.Hour // Make sure that no newly created blocks are deleted. - ingester, err := New(ingesterCfg, overrides, nil, log.NewNopLogger()) + ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester)) @@ -3981,7 +4447,7 @@ func TestIngester_CloseTSDBsOnShutdown(t *testing.T) { cfg.LifecyclerConfig.JoinAfter = 0 // Create ingester - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -4119,7 +4585,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { } func TestIngesterPushErrorDuringForcedCompaction(t *testing.T) { - i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), nil) + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -4316,7 +4782,7 @@ func TestIngester_PushInstanceLimits(t *testing.T) { return &testData.limits } - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -4415,7 +4881,7 @@ func TestIngester_inflightPushRequests(t *testing.T) { cfg.InstanceLimitsFn = func() *InstanceLimits { return &limits } cfg.LifecyclerConfig.JoinAfter = 0 - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -4431,29 +4897,8 @@ func TestIngester_inflightPushRequests(t *testing.T) { g, ctx := errgroup.WithContext(ctx) g.Go(func() error { - count := 100000 - target := time.Second - - // find right count to make sure that push takes given target duration. - for { - req := generateSamplesForLabel(labels.FromStrings(labels.MetricName, fmt.Sprintf("test-%d", count)), count) - - start := time.Now() - _, err := i.Push(ctx, req) - require.NoError(t, err) - - elapsed := time.Since(start) - t.Log(count, elapsed) - if elapsed > time.Second { - break - } - - count = int(float64(count) * float64(target/elapsed) * 1.5) // Adjust number of samples to hit our target push duration. - } - - // Now repeat push with number of samples calibrated to our target. + count := 3500000 req := generateSamplesForLabel(labels.FromStrings(labels.MetricName, fmt.Sprintf("real-%d", count)), count) - // Signal that we're going to do the real push now. close(startCh) @@ -4488,7 +4933,7 @@ func TestIngester_MaxExemplarsFallBack(t *testing.T) { dir := t.TempDir() blocksDir := filepath.Join(dir, "blocks") limits := defaultLimitsTestConfig() - i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, nil) + i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true) require.NoError(t, err) maxExemplars := i.getMaxExemplars("someTenant") @@ -4496,7 +4941,7 @@ func TestIngester_MaxExemplarsFallBack(t *testing.T) { // set max exemplars value in limits, and re-initialize the ingester limits.MaxExemplars = 5 - i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, nil) + i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, blocksDir, prometheus.NewRegistry(), true) require.NoError(t, err) // validate this value is picked up now diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index f636a975e0..9b572a8409 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -107,20 +107,20 @@ func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int // AssertMaxSeriesPerLabelSet limit has not been reached compared to the current // number of metrics with metadata in input and returns an error if so. -func (l *Limiter) AssertMaxSeriesPerLabelSet(userID string, metric labels.Labels, f func(validation.MaxSeriesPerLabelSet) (int, error)) error { - m := l.maxSeriesPerLabelSet(userID, metric) +func (l *Limiter) AssertMaxSeriesPerLabelSet(userID string, metric labels.Labels, f func(validation.LimitsPerLabelSet) (int, error)) error { + m := l.limitsPerLabelSets(userID, metric) for _, limit := range m { - maxFunc := func(string) int { - return limit.Limit + maxSeriesFunc := func(string) int { + return limit.Limits.MaxSeries } - local := l.maxByLocalAndGlobal(userID, maxFunc, maxFunc) + local := l.maxByLocalAndGlobal(userID, maxSeriesFunc, maxSeriesFunc) if u, err := f(limit); err != nil { return err } else if u >= local { return errMaxSeriesPerLabelSetLimitExceeded{ id: limit.Id, localLimit: local, - globalLimit: limit.Limit, + globalLimit: limit.Limits.MaxSeries, } } } @@ -189,15 +189,15 @@ func (l *Limiter) formatMaxSeriesPerLabelSetError(err errMaxSeriesPerLabelSetLim minNonZero(err.globalLimit, err.localLimit), err.id, err.localLimit, err.globalLimit) } -func (l *Limiter) maxSeriesPerLabelSet(userID string, metric labels.Labels) []validation.MaxSeriesPerLabelSet { - m := l.limits.MaxSeriesPerLabelSet(userID) +func (l *Limiter) limitsPerLabelSets(userID string, metric labels.Labels) []validation.LimitsPerLabelSet { + m := l.limits.LimitsPerLabelSet(userID) // returning early to not have any overhead if len(m) == 0 { return nil } - r := make([]validation.MaxSeriesPerLabelSet, 0, len(m)) + r := make([]validation.LimitsPerLabelSet, 0, len(m)) outer: for _, lbls := range m { for _, lbl := range lbls.LabelSet { diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go index 1b65e388ea..e4759191c1 100644 --- a/pkg/ingester/limiter_test.go +++ b/pkg/ingester/limiter_test.go @@ -2,9 +2,11 @@ package ingester import ( "errors" + "math" "testing" + "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -423,6 +425,90 @@ func TestLimiter_AssertMaxSeriesPerUser(t *testing.T) { } } +func TestLimiter_AssertMaxSeriesPerLabelSet(t *testing.T) { + + tests := map[string]struct { + limits validation.Limits + expected error + ringReplicationFactor int + ringIngesterCount int + shardByAllLabels bool + series int + }{ + "both local and global limit are disabled": { + ringReplicationFactor: 3, + ringIngesterCount: 10, + series: 200, + shardByAllLabels: true, + limits: validation.Limits{ + LimitsPerLabelSet: []validation.LimitsPerLabelSet{ + { + LabelSet: labels.FromMap(map[string]string{"foo": "bar"}), + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 0, + }, + }, + }, + }, + }, + "current number of series is above the limit": { + ringReplicationFactor: 3, + ringIngesterCount: 10, + series: 200, + shardByAllLabels: true, + expected: errMaxSeriesPerLabelSetLimitExceeded{globalLimit: 10, localLimit: 3}, + limits: validation.Limits{ + LimitsPerLabelSet: []validation.LimitsPerLabelSet{ + { + LabelSet: labels.FromMap(map[string]string{"foo": "bar"}), + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 10, + }, + }, + }, + }, + }, + "current number of series is below the limit and shard by all labels": { + ringReplicationFactor: 3, + ringIngesterCount: 10, + series: 2, + shardByAllLabels: true, + limits: validation.Limits{ + LimitsPerLabelSet: []validation.LimitsPerLabelSet{ + { + LabelSet: labels.FromMap(map[string]string{"foo": "bar"}), + Limits: validation.LimitsPerLabelSetEntry{ + MaxSeries: 10, + }, + }, + }, + }, + }, + } + + for testName, testData := range tests { + testData := testData + + t.Run(testName, func(t *testing.T) { + // Mock the ring + ring := &ringCountMock{} + ring.On("HealthyInstancesCount").Return(testData.ringIngesterCount) + ring.On("ZonesCount").Return(1) + + // Mock limits + limits, err := validation.NewOverrides(testData.limits, nil) + require.NoError(t, err) + + limiter := NewLimiter(limits, ring, util.ShardingStrategyDefault, testData.shardByAllLabels, testData.ringReplicationFactor, false, "") + actual := limiter.AssertMaxSeriesPerLabelSet("test", labels.FromStrings("foo", "bar"), func(set validation.LimitsPerLabelSet) (int, error) { + return testData.series, nil + }) + + assert.Equal(t, actual, testData.expected) + }) + } +} + func TestLimiter_AssertMaxMetricsWithMetadataPerUser(t *testing.T) { tests := map[string]struct { maxLocalMetadataPerUser int diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index 0cd258c9ae..422cd8d3b8 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -17,6 +17,11 @@ const ( memSeriesRemovedTotalHelp = "The total number of series that were removed per user." ) +const ( + sampleMetricTypeFloat = "float" + sampleMetricTypeHistogram = "histogram" +) + type ingesterMetrics struct { ingestedSamples prometheus.Counter ingestedExemplars prometheus.Counter @@ -37,8 +42,9 @@ type ingesterMetrics struct { memSeriesRemovedTotal *prometheus.CounterVec memMetadataRemovedTotal *prometheus.CounterVec - activeSeriesPerUser *prometheus.GaugeVec - activeSeriesPerLabelSet *prometheus.GaugeVec + activeSeriesPerUser *prometheus.GaugeVec + limitsPerLabelSet *prometheus.GaugeVec + usagePerLabelSet *prometheus.GaugeVec // Global limit metrics maxUsersGauge prometheus.GaugeFunc @@ -212,10 +218,15 @@ func newIngesterMetrics(r prometheus.Registerer, return 0 }), - activeSeriesPerLabelSet: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_ingester_active_series_per_labelset", - Help: "Number of currently active series per user and labelset.", - }, []string{"user", "labelset"}), + limitsPerLabelSet: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_ingester_limits_per_labelset", + Help: "Limits per user and labelset.", + }, []string{"user", "limit", "labelset"}), + + usagePerLabelSet: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_ingester_usage_per_labelset", + Help: "Current usage per user and labelset.", + }, []string{"user", "limit", "labelset"}), // Not registered automatically, but only if activeSeriesEnabled is true. activeSeriesPerUser: prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -287,11 +298,14 @@ type tsdbMetrics struct { tsdbMmapChunkCorruptionTotal *prometheus.Desc tsdbChunkwriteQueueOperationsTotal *prometheus.Desc tsdbSamplesAppended *prometheus.Desc - tsdbOutOfOrderSamplesAppended *prometheus.Desc - tsdbSnapshotReplayErrorTotal *prometheus.Desc - tsdbOOOHistogram *prometheus.Desc - tsdbMmapChunksTotal *prometheus.Desc - tsdbDataTotalReplayDuration *prometheus.Desc + // Although there is an existing sample-out-of-order discarded samples metric, some samples can still + // be dropped silently due to OOO at commit phase, and it doesn't increment the discarded samples metric. + tsdbOOOSamples *prometheus.Desc + tsdbOutOfOrderSamplesAppended *prometheus.Desc + tsdbSnapshotReplayErrorTotal *prometheus.Desc + tsdbOOOHistogram *prometheus.Desc + tsdbMmapChunksTotal *prometheus.Desc + tsdbDataTotalReplayDuration *prometheus.Desc tsdbExemplarsTotal *prometheus.Desc tsdbExemplarsInStorage *prometheus.Desc @@ -467,10 +481,14 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { "cortex_ingester_tsdb_head_samples_appended_total", "Total number of appended samples.", []string{"user", "type"}, nil), + tsdbOOOSamples: prometheus.NewDesc( + "cortex_ingester_tsdb_out_of_order_samples_total", + "Total number of out of order samples ingestion failed attempts due to out of order being disabled.", + []string{"user", "type"}, nil), tsdbOutOfOrderSamplesAppended: prometheus.NewDesc( "cortex_ingester_tsdb_head_out_of_order_samples_appended_total", "Total number of appended out of order samples.", - []string{"user"}, nil), + []string{"user", "type"}, nil), tsdbSnapshotReplayErrorTotal: prometheus.NewDesc( "cortex_ingester_tsdb_snapshot_replay_error_total", "Total number snapshot replays that failed.", @@ -553,6 +571,7 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.tsdbTimeRetentionCount out <- sm.tsdbBlocksBytes out <- sm.tsdbSamplesAppended + out <- sm.tsdbOOOSamples out <- sm.tsdbOutOfOrderSamplesAppended out <- sm.tsdbSnapshotReplayErrorTotal out <- sm.tsdbOOOHistogram @@ -609,7 +628,8 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCounters(out, sm.tsdbTimeRetentionCount, "prometheus_tsdb_time_retentions_total") data.SendSumOfGaugesPerUser(out, sm.tsdbBlocksBytes, "prometheus_tsdb_storage_blocks_bytes") data.SendSumOfCountersPerUserWithLabels(out, sm.tsdbSamplesAppended, "prometheus_tsdb_head_samples_appended_total", "type") - data.SendSumOfCountersPerUser(out, sm.tsdbOutOfOrderSamplesAppended, "prometheus_tsdb_head_out_of_order_samples_appended_total") + data.SendSumOfCountersPerUserWithLabels(out, sm.tsdbOOOSamples, "prometheus_tsdb_out_of_order_samples_total", "type") + data.SendSumOfCountersPerUserWithLabels(out, sm.tsdbOutOfOrderSamplesAppended, "prometheus_tsdb_head_out_of_order_samples_appended_total", "type") data.SendSumOfCounters(out, sm.tsdbSnapshotReplayErrorTotal, "prometheus_tsdb_snapshot_replay_error_total") data.SendSumOfHistograms(out, sm.tsdbOOOHistogram, "prometheus_tsdb_sample_ooo_delta") data.SendSumOfGauges(out, sm.tsdbMmapChunksTotal, "prometheus_tsdb_mmap_chunks_total") diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index b0f40a5c52..f5c5e2851f 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -247,14 +247,20 @@ func TestTSDBMetrics(t *testing.T) { cortex_ingester_tsdb_head_gc_duration_seconds_count 3 # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter - cortex_ingester_tsdb_head_out_of_order_samples_appended_total{user="user1"} 102 - cortex_ingester_tsdb_head_out_of_order_samples_appended_total{user="user2"} 102 - cortex_ingester_tsdb_head_out_of_order_samples_appended_total{user="user3"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="user1"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="user2"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="user3"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="histogram",user="user1"} 103 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="histogram",user="user2"} 103 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="histogram",user="user3"} 103 # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. # TYPE cortex_ingester_tsdb_head_samples_appended_total counter cortex_ingester_tsdb_head_samples_appended_total{type="float",user="user1"} 101 cortex_ingester_tsdb_head_samples_appended_total{type="float",user="user2"} 101 cortex_ingester_tsdb_head_samples_appended_total{type="float",user="user3"} 101 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="user1"} 102 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="user2"} 102 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="user3"} 102 # HELP cortex_ingester_tsdb_checkpoint_deletions_failed_total Total number of TSDB checkpoint deletions that failed. # TYPE cortex_ingester_tsdb_checkpoint_deletions_failed_total counter cortex_ingester_tsdb_checkpoint_deletions_failed_total 1586096 @@ -320,6 +326,14 @@ func TestTSDBMetrics(t *testing.T) { # HELP cortex_ingester_tsdb_mmap_chunks_total Total number of chunks that were memory-mapped. # TYPE cortex_ingester_tsdb_mmap_chunks_total gauge cortex_ingester_tsdb_mmap_chunks_total 0 + # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. + # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="user1"} 102 + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="user2"} 102 + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="user3"} 102 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="user1"} 103 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="user2"} 103 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="user3"} 103 # HELP cortex_ingester_tsdb_blocks_loaded Number of currently loaded data blocks # TYPE cortex_ingester_tsdb_blocks_loaded gauge cortex_ingester_tsdb_blocks_loaded 15 @@ -492,12 +506,16 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { cortex_ingester_tsdb_head_gc_duration_seconds_count 3 # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter - cortex_ingester_tsdb_head_out_of_order_samples_appended_total{user="user1"} 102 - cortex_ingester_tsdb_head_out_of_order_samples_appended_total{user="user2"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="user1"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="user2"} 102 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="histogram",user="user1"} 103 + cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="histogram",user="user2"} 103 # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. # TYPE cortex_ingester_tsdb_head_samples_appended_total counter cortex_ingester_tsdb_head_samples_appended_total{type="float",user="user1"} 101 cortex_ingester_tsdb_head_samples_appended_total{type="float",user="user2"} 101 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="user1"} 102 + cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="user2"} 102 # HELP cortex_ingester_tsdb_checkpoint_deletions_failed_total Total number of TSDB checkpoint deletions that failed. # TYPE cortex_ingester_tsdb_checkpoint_deletions_failed_total counter @@ -560,6 +578,12 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # HELP cortex_ingester_tsdb_mmap_chunks_total Total number of chunks that were memory-mapped. # TYPE cortex_ingester_tsdb_mmap_chunks_total gauge cortex_ingester_tsdb_mmap_chunks_total 0 + # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. + # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="user1"} 102 + cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="user2"} 102 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="user1"} 103 + cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="user2"} 103 # HELP cortex_ingester_tsdb_blocks_loaded Number of currently loaded data blocks # TYPE cortex_ingester_tsdb_blocks_loaded gauge cortex_ingester_tsdb_blocks_loaded 10 @@ -813,13 +837,22 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { Name: "prometheus_tsdb_head_samples_appended_total", Help: "Total number of appended samples.", }, []string{"type"}) - samplesAppended.WithLabelValues("float").Add(101) + samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(101) + samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(102) - outOfOrderSamplesAppended := promauto.With(r).NewCounter(prometheus.CounterOpts{ + outOfOrderSamplesAppended := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_out_of_order_samples_appended_total", Help: "Total number of appended out of order samples.", - }) - outOfOrderSamplesAppended.Add(102) + }, []string{"type"}) + outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(102) + outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(103) + + oooSamples := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_out_of_order_samples_total", + Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.", + }, []string{"type"}) + oooSamples.WithLabelValues(sampleMetricTypeFloat).Add(102) + oooSamples.WithLabelValues(sampleMetricTypeHistogram).Add(103) snapshotReplayErrorTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_snapshot_replay_error_total", diff --git a/pkg/ingester/user_metrics_metadata.go b/pkg/ingester/user_metrics_metadata.go index 8c4156f3b6..96adfaa9ba 100644 --- a/pkg/ingester/user_metrics_metadata.go +++ b/pkg/ingester/user_metrics_metadata.go @@ -14,19 +14,21 @@ import ( // Metadata is kept as a set as it can come from multiple targets that Prometheus scrapes // with the same metric name. type userMetricsMetadata struct { - limiter *Limiter - metrics *ingesterMetrics - userID string + limiter *Limiter + metrics *ingesterMetrics + validateMetrics *validation.ValidateMetrics + userID string mtx sync.RWMutex metricToMetadata map[string]metricMetadataSet } -func newMetadataMap(l *Limiter, m *ingesterMetrics, userID string) *userMetricsMetadata { +func newMetadataMap(l *Limiter, m *ingesterMetrics, v *validation.ValidateMetrics, userID string) *userMetricsMetadata { return &userMetricsMetadata{ metricToMetadata: map[string]metricMetadataSet{}, limiter: l, metrics: m, + validateMetrics: v, userID: userID, } } @@ -42,7 +44,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetad if !ok { // Verify that the user can create more metric metadata given we don't have a set for that metric name. if err := mm.limiter.AssertMaxMetricsWithMetadataPerUser(mm.userID, len(mm.metricToMetadata)); err != nil { - validation.DiscardedMetadata.WithLabelValues(mm.userID, perUserMetadataLimit).Inc() + mm.validateMetrics.DiscardedMetadata.WithLabelValues(mm.userID, perUserMetadataLimit).Inc() return makeLimitError(perUserMetadataLimit, mm.limiter.FormatError(mm.userID, err)) } set = metricMetadataSet{} @@ -50,7 +52,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetad } if err := mm.limiter.AssertMaxMetadataPerMetric(mm.userID, len(set)); err != nil { - validation.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc() + mm.validateMetrics.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc() return makeMetricLimitError(perMetricMetadataLimit, labels.FromStrings(labels.MetricName, metric), mm.limiter.FormatError(mm.userID, err)) } diff --git a/pkg/ingester/user_state.go b/pkg/ingester/user_state.go index dc9bfe54b1..60ca5c1eb6 100644 --- a/pkg/ingester/user_state.go +++ b/pkg/ingester/user_state.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/index" @@ -115,7 +114,7 @@ func newLabelSetCounter(limiter *Limiter) *labelSetCounter { } func (m *labelSetCounter) canAddSeriesForLabelSet(ctx context.Context, u *userTSDB, metric labels.Labels) error { - return m.limiter.AssertMaxSeriesPerLabelSet(u.userID, metric, func(set validation.MaxSeriesPerLabelSet) (int, error) { + return m.limiter.AssertMaxSeriesPerLabelSet(u.userID, metric, func(set validation.LimitsPerLabelSet) (int, error) { s := m.shards[util.HashFP(model.Fingerprint(set.Hash))%numMetricCounterShards] s.RLock() if r, ok := s.valuesCounter[set.Hash]; ok { @@ -129,7 +128,7 @@ func (m *labelSetCounter) canAddSeriesForLabelSet(ctx context.Context, u *userTS }) } -func (m *labelSetCounter) backFillLimit(ctx context.Context, u *userTSDB, limit validation.MaxSeriesPerLabelSet, s *labelSetCounterShard) (int, error) { +func (m *labelSetCounter) backFillLimit(ctx context.Context, u *userTSDB, limit validation.LimitsPerLabelSet, s *labelSetCounterShard) (int, error) { ir, err := u.db.Head().Index() if err != nil { return 0, err @@ -171,7 +170,7 @@ func (m *labelSetCounter) backFillLimit(ctx context.Context, u *userTSDB, limit } func (m *labelSetCounter) increaseSeriesLabelSet(u *userTSDB, metric labels.Labels) { - limits := m.limiter.maxSeriesPerLabelSet(u.userID, metric) + limits := m.limiter.limitsPerLabelSets(u.userID, metric) for _, l := range limits { s := m.shards[util.HashFP(model.Fingerprint(l.Hash))%numMetricCounterShards] s.Lock() @@ -188,7 +187,7 @@ func (m *labelSetCounter) increaseSeriesLabelSet(u *userTSDB, metric labels.Labe } func (m *labelSetCounter) decreaseSeriesLabelSet(u *userTSDB, metric labels.Labels) { - limits := m.limiter.maxSeriesPerLabelSet(u.userID, metric) + limits := m.limiter.limitsPerLabelSets(u.userID, metric) for _, l := range limits { s := m.shards[util.HashFP(model.Fingerprint(l.Hash))%numMetricCounterShards] s.Lock() @@ -199,9 +198,9 @@ func (m *labelSetCounter) decreaseSeriesLabelSet(u *userTSDB, metric labels.Labe } } -func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, vec *prometheus.GaugeVec) error { - currentLbsLimitHash := map[uint64]validation.MaxSeriesPerLabelSet{} - for _, l := range m.limiter.limits.MaxSeriesPerLabelSet(u.userID) { +func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, metrics *ingesterMetrics) error { + currentLbsLimitHash := map[uint64]validation.LimitsPerLabelSet{} + for _, l := range m.limiter.limits.LimitsPerLabelSet(u.userID) { currentLbsLimitHash[l.Hash] = l } @@ -209,13 +208,16 @@ func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, vec *pr s := m.shards[i] s.RLock() for h, entry := range s.valuesCounter { + lbls := entry.labels.String() // This limit no longer exists if _, ok := currentLbsLimitHash[h]; !ok { - vec.DeleteLabelValues(u.userID, entry.labels.String()) + metrics.usagePerLabelSet.DeleteLabelValues(u.userID, "max_series", lbls) + metrics.limitsPerLabelSet.DeleteLabelValues(u.userID, "max_series", lbls) continue } + metrics.usagePerLabelSet.WithLabelValues(u.userID, "max_series", lbls).Set(float64(entry.count)) + metrics.limitsPerLabelSet.WithLabelValues(u.userID, "max_series", lbls).Set(float64(currentLbsLimitHash[h].Limits.MaxSeries)) delete(currentLbsLimitHash, h) - vec.WithLabelValues(u.userID, entry.labels.String()).Set(float64(entry.count)) } s.RUnlock() } @@ -227,7 +229,9 @@ func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, vec *pr if err != nil { return err } - vec.WithLabelValues(u.userID, l.LabelSet.String()).Set(float64(count)) + lbls := l.LabelSet.String() + metrics.usagePerLabelSet.WithLabelValues(u.userID, "max_series", lbls).Set(float64(count)) + metrics.limitsPerLabelSet.WithLabelValues(u.userID, "max_series", lbls).Set(float64(l.Limits.MaxSeries)) } return nil diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index 801e28008d..ffd2c248b2 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -11,6 +11,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/util" histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" ) @@ -127,8 +128,8 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { promchunk.PrometheusFloatHistogramChunk, } { valType := enc.ChunkValueType() - chunkOne := mkChunk(t, step, model.Time(1*step/time.Millisecond), 1, enc) - chunkTwo := mkChunk(t, step, model.Time(10*step/time.Millisecond), 1, enc) + chunkOne := util.GenerateChunk(t, step, model.Time(1*step/time.Millisecond), 1, enc) + chunkTwo := util.GenerateChunk(t, step, model.Time(10*step/time.Millisecond), 1, enc) chunks := []chunk.Chunk{chunkOne, chunkTwo} sut := NewChunkMergeIterator(chunks, 0, 0) @@ -140,7 +141,7 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { switch enc { case promchunk.PrometheusXorChunk: actual, val := sut.At() - require.Equal(t, float64(1*time.Second/time.Millisecond), val) // since mkChunk use ts as value. + require.Equal(t, float64(1*time.Second/time.Millisecond), val) // since util.GenerateChunk use ts as value. require.Equal(t, int64(1*time.Second/time.Millisecond), actual) case promchunk.PrometheusHistogramChunk: actual, val := sut.AtHistogram(nil) @@ -160,7 +161,7 @@ func createChunks(b *testing.B, step time.Duration, numChunks, numSamplesPerChun for d := 0; d < duplicationFactor; d++ { for c := 0; c < numChunks; c++ { minTime := step * time.Duration(c*numSamplesPerChunk) - result = append(result, mkChunk(b, step, model.Time(minTime.Milliseconds()), numSamplesPerChunk, enc)) + result = append(result, util.GenerateChunk(b, step, model.Time(minTime.Milliseconds()), numSamplesPerChunk, enc)) } } diff --git a/pkg/querier/batch/chunk_test.go b/pkg/querier/batch/chunk_test.go index b8327a70e9..2eb7ad5194 100644 --- a/pkg/querier/batch/chunk_test.go +++ b/pkg/querier/batch/chunk_test.go @@ -6,12 +6,12 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/chunk" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/util" histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" ) @@ -47,45 +47,8 @@ func forEncodings(t *testing.T, f func(t *testing.T, enc promchunk.Encoding)) { } } -func mkChunk(t require.TestingT, step time.Duration, from model.Time, points int, enc promchunk.Encoding) chunk.Chunk { - metric := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - } - pe := enc.PromChunkEncoding() - pc, err := chunkenc.NewEmptyChunk(pe) - require.NoError(t, err) - appender, err := pc.Appender() - require.NoError(t, err) - ts := from - - switch pe { - case chunkenc.EncXOR: - for i := 0; i < points; i++ { - appender.Append(int64(ts), float64(ts)) - ts = ts.Add(step) - } - case chunkenc.EncHistogram: - histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points, 5, 20) - for i := 0; i < points; i++ { - _, _, appender, err = appender.AppendHistogram(nil, int64(ts), histograms[i], true) - require.NoError(t, err) - ts = ts.Add(step) - } - case chunkenc.EncFloatHistogram: - histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points, 5, 20) - for i := 0; i < points; i++ { - _, _, appender, err = appender.AppendFloatHistogram(nil, int64(ts), histograms[i].ToFloat(nil), true) - require.NoError(t, err) - ts = ts.Add(step) - } - } - - ts = ts.Add(-step) // undo the add that we did just before exiting the loop - return chunk.NewChunk(metric, pc, from, ts) -} - func mkGenericChunk(t require.TestingT, from model.Time, points int, enc promchunk.Encoding) GenericChunk { - ck := mkChunk(t, step, from, points, enc) + ck := util.GenerateChunk(t, step, from, points, enc) return NewGenericChunk(int64(ck.From), int64(ck.Through), ck.NewIterator) } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 7c56649a05..3340e9ecb0 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -2,6 +2,7 @@ package querier import ( "context" + "encoding/binary" "fmt" "io" "sort" @@ -1162,7 +1163,10 @@ func countSamplesAndChunks(series ...*storepb.Series) (samplesCount, chunksCount chunksCount += uint64(len(s.Chunks)) for _, c := range s.Chunks { if c.Raw != nil { - samplesCount += uint64(c.Raw.XORNumSamples()) + switch c.Raw.Type { + case storepb.Chunk_XOR, storepb.Chunk_HISTOGRAM, storepb.Chunk_FLOAT_HISTOGRAM: + samplesCount += uint64(binary.BigEndian.Uint16(c.Raw.Data)) + } } } } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 40bde1f4cf..3ff14fbca1 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -3,6 +3,7 @@ package querier import ( "context" "fmt" + "io" "sort" "strings" @@ -15,6 +16,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -33,6 +35,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/storegateway" @@ -1824,13 +1827,9 @@ func valuesFromSeries(name string, series ...labels.Labels) []string { } func TestCountSamplesAndChunks(t *testing.T) { - c := chunkenc.NewXORChunk() - appender, err := c.Appender() - require.NoError(t, err) - samples := 300 - for i := 0; i < samples; i++ { - appender.Append(int64(i), float64(i)) - } + floatChk := util.GenerateChunk(t, time.Second, model.Time(0), 100, encoding.PrometheusXorChunk) + histogramChk := util.GenerateChunk(t, time.Second, model.Time(0), 300, encoding.PrometheusHistogramChunk) + floatHistogramChk := util.GenerateChunk(t, time.Second, model.Time(0), 500, encoding.PrometheusFloatHistogramChunk) for i, tc := range []struct { serieses []*storepb.Series @@ -1844,13 +1843,13 @@ func TestCountSamplesAndChunks(t *testing.T) { { Raw: &storepb.Chunk{ Type: storepb.Chunk_XOR, - Data: c.Bytes(), + Data: floatChk.Data.Bytes(), }, }, }, }, }, - expectedSamples: uint64(samples), + expectedSamples: uint64(100), expectedChunks: 1, }, { @@ -1860,21 +1859,49 @@ func TestCountSamplesAndChunks(t *testing.T) { { Raw: &storepb.Chunk{ Type: storepb.Chunk_XOR, - Data: c.Bytes(), + Data: floatChk.Data.Bytes(), }, }, { Raw: &storepb.Chunk{ - Type: storepb.Chunk_XOR, - Data: c.Bytes(), + Type: storepb.Chunk_HISTOGRAM, + Data: histogramChk.Data.Bytes(), }, }, }, }, }, - expectedSamples: uint64(int64(samples) * 2), + expectedSamples: 400, expectedChunks: 2, }, + { + serieses: []*storepb.Series{ + { + Chunks: []storepb.AggrChunk{ + { + Raw: &storepb.Chunk{ + Type: storepb.Chunk_XOR, + Data: floatChk.Data.Bytes(), + }, + }, + { + Raw: &storepb.Chunk{ + Type: storepb.Chunk_HISTOGRAM, + Data: histogramChk.Data.Bytes(), + }, + }, + { + Raw: &storepb.Chunk{ + Type: storepb.Chunk_FLOAT_HISTOGRAM, + Data: floatHistogramChk.Data.Bytes(), + }, + }, + }, + }, + }, + expectedSamples: 900, + expectedChunks: 3, + }, } { t.Run(fmt.Sprintf("test_case_%d", i), func(t *testing.T) { samples, chunks := countSamplesAndChunks(tc.serieses...) diff --git a/pkg/querier/chunk_store_queryable_test.go b/pkg/querier/chunk_store_queryable_test.go index 5a915f507b..2d32769a52 100644 --- a/pkg/querier/chunk_store_queryable_test.go +++ b/pkg/querier/chunk_store_queryable_test.go @@ -1,14 +1,12 @@ package querier import ( - "time" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/util" ) type mockChunkStore struct { @@ -19,28 +17,15 @@ func (m mockChunkStore) Get() ([]chunk.Chunk, error) { return m.chunks, nil } -func makeMockChunkStore(t require.TestingT, numChunks int) (mockChunkStore, model.Time) { +func makeMockChunkStore(t require.TestingT, numChunks int, enc encoding.Encoding) (mockChunkStore, model.Time) { var ( chunks = make([]chunk.Chunk, 0, numChunks) from = model.Time(0) ) for i := 0; i < numChunks; i++ { - c := mkChunk(t, from, from.Add(samplesPerChunk*sampleRate), sampleRate) + c := util.GenerateChunk(t, sampleRate, from, int(samplesPerChunk), enc) chunks = append(chunks, c) from = from.Add(chunkOffset) } return mockChunkStore{chunks}, from } - -func mkChunk(t require.TestingT, mint, maxt model.Time, step time.Duration) chunk.Chunk { - metric := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - } - pc := chunkenc.NewXORChunk() - appender, err := pc.Appender() - require.NoError(t, err) - for i := mint; i.Before(maxt); i = i.Add(step) { - appender.Append(int64(i), float64(i)) - } - return chunk.NewChunk(metric, pc, mint, maxt) -} diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 8b4887714e..55d5ec0201 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -77,11 +77,10 @@ type query struct { } var ( - encodings = []struct { - name string - e promchunk.Encoding - }{ - {"PrometheusXorChunk", promchunk.PrometheusXorChunk}, + encodings = []promchunk.Encoding{ + promchunk.PrometheusXorChunk, + promchunk.PrometheusHistogramChunk, + promchunk.PrometheusFloatHistogramChunk, } queries = []query{ @@ -177,14 +176,14 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { {Name: model.MetricNameLabel, Value: "foo"}, {Name: "order", Value: "2"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, { Labels: []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, {Name: "order", Value: "1"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, }, } @@ -326,35 +325,35 @@ func TestLimits(t *testing.T) { {Name: model.MetricNameLabel, Value: "foo"}, {Name: "order", Value: "2"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, { Labels: []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, {Name: "order", Value: "1"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, { Labels: []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, {Name: "orders", Value: "3"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, { Labels: []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "bar"}, {Name: "orders", Value: "2"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, { Labels: []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "bar"}, {Name: "orders", Value: "1"}, }, - Chunks: ConvertToChunks(t, samples), + Chunks: ConvertToChunks(t, samples, nil), }, }, } @@ -481,31 +480,29 @@ func TestQuerier(t *testing.T) { } for _, thanosEngine := range []bool{false, true} { for _, query := range queries { - for _, encoding := range encodings { - t.Run(fmt.Sprintf("%s/%s", query.query, encoding.name), func(t *testing.T) { - var queryEngine promql.QueryEngine - if thanosEngine { - queryEngine = engine.New(engine.Opts{ - EngineOpts: opts, - LogicalOptimizers: logicalplan.AllOptimizers, - }) - } else { - queryEngine = promql.NewEngine(opts) - } - // Disable active query tracker to avoid mmap error. - cfg.ActiveQueryTrackerDir = "" + t.Run(fmt.Sprintf("thanosEngine=%s,query=%s", strconv.FormatBool(thanosEngine), query.query), func(t *testing.T) { + var queryEngine promql.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + // Disable active query tracker to avoid mmap error. + cfg.ActiveQueryTrackerDir = "" - chunkStore, through := makeMockChunkStore(t, chunks) - distributor := mockDistibutorFor(t, chunkStore.chunks) + chunkStore, through := makeMockChunkStore(t, chunks, promchunk.PrometheusXorChunk) + distributor := mockDistibutorFor(t, chunkStore.chunks) - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore)), UseAlwaysQueryable(db)} - queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) - testRangeQuery(t, queryable, queryEngine, through, query) - }) - } + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore)), UseAlwaysQueryable(db)} + queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) + testRangeQuery(t, queryable, queryEngine, through, query) + }) } } } @@ -518,7 +515,7 @@ func TestQuerierMetric(t *testing.T) { overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) require.NoError(t, err) - chunkStore, _ := makeMockChunkStore(t, 24) + chunkStore, _ := makeMockChunkStore(t, 24, promchunk.PrometheusXorChunk) distributor := mockDistibutorFor(t, chunkStore.chunks) queryables := []QueryableWithFilter{} @@ -627,47 +624,49 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { // Disable active query tracker to avoid mmap error. cfg.ActiveQueryTrackerDir = "" for _, thanosEngine := range []bool{true, false} { - for _, c := range testCases { - cfg.QueryIngestersWithin = c.queryIngestersWithin - t.Run(fmt.Sprintf("thanosEngine=%t,queryIngestersWithin=%v, test=%s", thanosEngine, c.queryIngestersWithin, c.name), func(t *testing.T) { - var queryEngine promql.QueryEngine - if thanosEngine { - queryEngine = engine.New(engine.Opts{ - EngineOpts: opts, - LogicalOptimizers: logicalplan.AllOptimizers, - }) - } else { - queryEngine = promql.NewEngine(opts) - } + for _, encoding := range encodings { + for _, c := range testCases { + cfg.QueryIngestersWithin = c.queryIngestersWithin + t.Run(fmt.Sprintf("thanosEngine=%t,encoding=%s,queryIngestersWithin=%v, test=%s", thanosEngine, encoding.String(), c.queryIngestersWithin, c.name), func(t *testing.T) { + var queryEngine promql.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } - chunkStore, _ := makeMockChunkStore(t, 24) - distributor := &errDistributor{} + chunkStore, _ := makeMockChunkStore(t, 24, encoding) + distributor := &errDistributor{} - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") - queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, nil, log.NewNopLogger()) - query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) - require.NoError(t, err) + ctx := user.InjectOrgID(context.Background(), "0") + queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, nil, log.NewNopLogger()) + query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) + require.NoError(t, err) - r := query.Exec(ctx) - _, err = r.Matrix() - - if c.hitIngester { - // If the ingester was hit, the distributor always returns errDistributorError. Prometheus - // wrap any Select() error into "expanding series", so we do wrap it as well to have a match. - require.Error(t, err) - if !thanosEngine { - require.Equal(t, errors.Wrap(errDistributorError, "expanding series").Error(), err.Error()) + r := query.Exec(ctx) + _, err = r.Matrix() + + if c.hitIngester { + // If the ingester was hit, the distributor always returns errDistributorError. Prometheus + // wrap any Select() error into "expanding series", so we do wrap it as well to have a match. + require.Error(t, err) + if !thanosEngine { + require.Equal(t, errors.Wrap(errDistributorError, "expanding series").Error(), err.Error()) + } else { + require.Equal(t, errDistributorError.Error(), err.Error()) + } } else { - require.Equal(t, errDistributorError.Error(), err.Error()) + // If the ingester was hit, there would have been an error from errDistributor. + require.NoError(t, err) } - } else { - // If the ingester was hit, there would have been an error from errDistributor. - require.NoError(t, err) - } - }) + }) + } } } } diff --git a/pkg/querier/store_gateway_client.go b/pkg/querier/store_gateway_client.go index 6800fc0382..df246f000e 100644 --- a/pkg/querier/store_gateway_client.go +++ b/pkg/querier/store_gateway_client.go @@ -37,7 +37,7 @@ func dialStoreGatewayClient(clientCfg grpcclient.Config, addr string, requestDur return nil, err } - conn, err := grpc.Dial(addr, opts...) + conn, err := grpc.NewClient(addr, opts...) if err != nil { return nil, errors.Wrapf(err, "failed to dial store-gateway %s", addr) } diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index 235f63a9f5..a335bff618 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -109,15 +109,36 @@ func DefaultLimitsConfig() validation.Limits { return limits } -func ConvertToChunks(t *testing.T, samples []cortexpb.Sample) []client.Chunk { +func ConvertToChunks(t *testing.T, samples []cortexpb.Sample, histograms []*cortexpb.Histogram) []client.Chunk { // We need to make sure that there is at least one chunk present, // else no series will be selected. - chk := chunkenc.NewXORChunk() - appender, err := chk.Appender() - require.NoError(t, err) - - for _, s := range samples { - appender.Append(s.TimestampMs, s.Value) + var chk chunkenc.Chunk + if len(samples) > 0 { + chk = chunkenc.NewXORChunk() + appender, err := chk.Appender() + require.NoError(t, err) + + for _, s := range samples { + appender.Append(s.TimestampMs, s.Value) + } + } else if len(histograms) > 0 { + if histograms[0].IsFloatHistogram() { + chk = chunkenc.NewFloatHistogramChunk() + appender, err := chk.Appender() + require.NoError(t, err) + for _, h := range histograms { + _, _, _, err = appender.AppendFloatHistogram(nil, h.TimestampMs, cortexpb.FloatHistogramProtoToFloatHistogram(*h), true) + } + require.NoError(t, err) + } else { + chk = chunkenc.NewHistogramChunk() + appender, err := chk.Appender() + require.NoError(t, err) + for _, h := range histograms { + _, _, _, err = appender.AppendHistogram(nil, h.TimestampMs, cortexpb.HistogramProtoToHistogram(*h), true) + } + require.NoError(t, err) + } } c := chunk.NewChunk(nil, chk, model.Time(samples[0].TimestampMs), model.Time(samples[len(samples)-1].TimestampMs)) diff --git a/pkg/querier/tripperware/merge.go b/pkg/querier/tripperware/merge.go index 84dbb81afb..2ae5a9793c 100644 --- a/pkg/querier/tripperware/merge.go +++ b/pkg/querier/tripperware/merge.go @@ -30,7 +30,18 @@ func MergeSampleStreams(output map[string]SampleStream, sampleStreams []SampleSt stream.Samples = sliceSamples(stream.Samples, existingEndTs) } // else there is no overlap, yay! } + // Same for histograms as for samples above. + if len(existing.Histograms) > 0 && len(stream.Histograms) > 0 { + existingEndTs := existing.Histograms[len(existing.Histograms)-1].GetTimestampMs() + if existingEndTs == stream.Histograms[0].GetTimestampMs() { + stream.Histograms = stream.Histograms[1:] + } else if existingEndTs > stream.Histograms[0].GetTimestampMs() { + stream.Histograms = sliceHistograms(stream.Histograms, existingEndTs) + } + } existing.Samples = append(existing.Samples, stream.Samples...) + existing.Histograms = append(existing.Histograms, stream.Histograms...) + output[metric] = existing } } @@ -55,3 +66,23 @@ func sliceSamples(samples []cortexpb.Sample, minTs int64) []cortexpb.Sample { return samples[searchResult:] } + +// sliceHistogram assumes given histogram are sorted by timestamp in ascending order and +// return a sub slice whose first element's is the smallest timestamp that is strictly +// bigger than the given minTs. Empty slice is returned if minTs is bigger than all the +// timestamps in histogram. +func sliceHistograms(histograms []SampleHistogramPair, minTs int64) []SampleHistogramPair { + if len(histograms) <= 0 || minTs < histograms[0].GetTimestampMs() { + return histograms + } + + if len(histograms) > 0 && minTs > histograms[len(histograms)-1].GetTimestampMs() { + return histograms[len(histograms):] + } + + searchResult := sort.Search(len(histograms), func(i int) bool { + return histograms[i].GetTimestampMs() > minTs + }) + + return histograms[searchResult:] +} diff --git a/pkg/querier/tripperware/merge_test.go b/pkg/querier/tripperware/merge_test.go index 8b56c6b708..124cfd08b5 100644 --- a/pkg/querier/tripperware/merge_test.go +++ b/pkg/querier/tripperware/merge_test.go @@ -10,6 +10,56 @@ import ( ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" ) +var ( + testHistogram1 = SampleHistogram{ + Count: 13.5, + Sum: 3897.1, + Buckets: []*HistogramBucket{ + { + Boundaries: 1, + Lower: -4870.992343051145, + Upper: -4466.7196729968955, + Count: 1, + }, + { + Boundaries: 1, + Lower: -861.0779292198035, + Upper: -789.6119426088657, + Count: 2, + }, + { + Boundaries: 1, + Lower: -558.3399591246119, + Upper: -512, + Count: 3, + }, + { + Boundaries: 0, + Lower: 2048, + Upper: 2233.3598364984477, + Count: 1.5, + }, + { + Boundaries: 0, + Lower: 2896.3093757400984, + Upper: 3158.4477704354626, + Count: 2.5, + }, + { + Boundaries: 0, + Lower: 4466.7196729968955, + Upper: 4870.992343051145, + Count: 3.5, + }, + }, + } + + testHistogram2 = SampleHistogram{ + Count: 10, + Sum: 100, + } +) + func TestMergeSampleStreams(t *testing.T) { t.Parallel() lbls := labels.FromMap(map[string]string{ @@ -44,6 +94,50 @@ func TestMergeSampleStreams(t *testing.T) { }, }, }, + { + name: "one sample stream with histograms", + sampleStreams: []SampleStream{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + }, + }, + }, + expectedOutput: map[string]SampleStream{ + ingester_client.LabelsToKeyString(lbls): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + }, + }, + }, + }, + { + name: "one sample stream with both samples and histograms", + sampleStreams: []SampleStream{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + }, + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + }, + }, + }, + expectedOutput: map[string]SampleStream{ + ingester_client.LabelsToKeyString(lbls): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + }, + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + }, + }, + }, + }, { name: "two sample streams with only one metric", sampleStreams: []SampleStream{ @@ -76,6 +170,86 @@ func TestMergeSampleStreams(t *testing.T) { }, }, }, + { + name: "two sample streams with only one metric, histograms", + sampleStreams: []SampleStream{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + }, + }, + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 1}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + }, + expectedOutput: map[string]SampleStream{ + ingester_client.LabelsToKeyString(lbls): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + }, + }, + { + name: "two sample streams with only one metric, samples and histograms", + sampleStreams: []SampleStream{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + }, + }, + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + {Value: 4, TimestampMs: 4}, + }, + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 1}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + }, + expectedOutput: map[string]SampleStream{ + ingester_client.LabelsToKeyString(lbls): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + {Value: 4, TimestampMs: 4}, + }, + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + }, + }, { name: "two sample streams with two metrics", sampleStreams: []SampleStream{ @@ -130,6 +304,60 @@ func TestMergeSampleStreams(t *testing.T) { }, }, }, + { + name: "two sample streams with two metrics, histograms", + sampleStreams: []SampleStream{ + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + }, + }, + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram2, TimestampMs: 1}, + {Histogram: testHistogram2, TimestampMs: 4}, + }, + }, + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls1), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 1}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls1), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram2, TimestampMs: 2}, + {Histogram: testHistogram2, TimestampMs: 3}, + }, + }, + }, + expectedOutput: map[string]SampleStream{ + ingester_client.LabelsToKeyString(lbls): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 2}, + {Histogram: testHistogram1, TimestampMs: 3}, + {Histogram: testHistogram2, TimestampMs: 4}, + }, + }, + ingester_client.LabelsToKeyString(lbls1): { + Labels: cortexpb.FromLabelsToLabelAdapters(lbls1), + Histograms: []SampleHistogramPair{ + {Histogram: testHistogram1, TimestampMs: 0}, + {Histogram: testHistogram1, TimestampMs: 1}, + {Histogram: testHistogram1, TimestampMs: 4}, + }, + }, + }, + }, } { tc := tc t.Run(tc.name, func(t *testing.T) { @@ -219,3 +447,152 @@ func TestSliceSamples(t *testing.T) { }) } } + +func TestSliceHistograms(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + name string + histograms []SampleHistogramPair + minTs int64 + expectedHistograms []SampleHistogramPair + }{ + {name: "empty histograms"}, + { + name: "minTs smaller than first histogram's timestamp", + histograms: []SampleHistogramPair{ + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + }, + minTs: 0, + expectedHistograms: []SampleHistogramPair{ + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + }, + }, + { + name: "input histograms are not sorted, return all histograms", + histograms: []SampleHistogramPair{ + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + }, + minTs: 2, + expectedHistograms: []SampleHistogramPair{ + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + }, + }, + { + name: "minTs greater than the last histogram's timestamp", + histograms: []SampleHistogramPair{ + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + { + TimestampMs: 2, + Histogram: testHistogram1, + }, + }, + minTs: 3, + expectedHistograms: []SampleHistogramPair{}, + }, + { + name: "input histograms not sorted, minTs greater than the last histogram's timestamp", + histograms: []SampleHistogramPair{ + { + TimestampMs: 0, + Histogram: testHistogram1, + }, + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + }, + minTs: 2, + expectedHistograms: []SampleHistogramPair{}, + }, + { + name: "input histograms are sorted", + histograms: []SampleHistogramPair{ + { + TimestampMs: 2, + Histogram: testHistogram1, + }, + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + { + TimestampMs: 4, + Histogram: testHistogram1, + }, + }, + minTs: 1, + expectedHistograms: []SampleHistogramPair{ + { + TimestampMs: 2, + Histogram: testHistogram1, + }, + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + { + TimestampMs: 4, + Histogram: testHistogram1, + }, + }, + }, + { + name: "input histograms are sorted, get sliced histograms", + histograms: []SampleHistogramPair{ + { + TimestampMs: 1, + Histogram: testHistogram1, + }, + { + TimestampMs: 2, + Histogram: testHistogram1, + }, + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + }, + minTs: 2, + expectedHistograms: []SampleHistogramPair{ + { + TimestampMs: 3, + Histogram: testHistogram1, + }, + }, + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + actual := sliceHistograms(tc.histograms, tc.minTs) + assert.Equal(t, tc.expectedHistograms, actual) + }) + } +} diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 725f3dcd1e..af3e8029ad 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "context" + "fmt" "io" "net/http" "sort" @@ -16,8 +17,10 @@ import ( "github.com/gogo/protobuf/proto" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/jsonutil" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -84,28 +87,34 @@ type Request interface { } func decodeSampleStream(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - lbls := labels.Labels{} - samples := []cortexpb.Sample{} + ss := (*SampleStream)(ptr) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { switch field { case "metric": - iter.ReadVal(&lbls) + metricString := iter.ReadAny().ToString() + lbls := labels.Labels{} + if err := json.UnmarshalFromString(metricString, &lbls); err != nil { + iter.ReportError("unmarshal SampleStream", err.Error()) + return + } + ss.Labels = cortexpb.FromLabelsToLabelAdapters(lbls) case "values": - for { - if !iter.ReadArray() { - break - } + for iter.ReadArray() { s := cortexpb.Sample{} cortexpb.SampleJsoniterDecode(unsafe.Pointer(&s), iter) - samples = append(samples, s) + ss.Samples = append(ss.Samples, s) } + case "histograms": + for iter.ReadArray() { + h := SampleHistogramPair{} + unmarshalSampleHistogramPairJSON(unsafe.Pointer(&h), iter) + ss.Histograms = append(ss.Histograms, h) + } + default: + iter.ReportError("unmarshal SampleStream", fmt.Sprint("unexpected key:", field)) + return } } - - *(*SampleStream)(ptr) = SampleStream{ - Samples: samples, - Labels: cortexpb.FromLabelsToLabelAdapters(lbls), - } } func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { @@ -119,33 +128,34 @@ func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { return } stream.SetBuffer(append(stream.Buffer(), lbls...)) - stream.WriteMore() - stream.WriteObjectField(`values`) - stream.WriteArrayStart() - for i, sample := range ss.Samples { - if i != 0 { - stream.WriteMore() + if len(ss.Samples) > 0 { + stream.WriteMore() + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + for i, sample := range ss.Samples { + if i != 0 { + stream.WriteMore() + } + cortexpb.SampleJsoniterEncode(unsafe.Pointer(&sample), stream) } - cortexpb.SampleJsoniterEncode(unsafe.Pointer(&sample), stream) + stream.WriteArrayEnd() } - stream.WriteArrayEnd() - stream.WriteObjectEnd() -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SampleStream) UnmarshalJSON(data []byte) error { - var stream struct { - Metric labels.Labels `json:"metric"` - Values []cortexpb.Sample `json:"values"` - } - if err := json.Unmarshal(data, &stream); err != nil { - return err + if len(ss.Histograms) > 0 { + stream.WriteMore() + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() + for i, h := range ss.Histograms { + if i > 0 { + stream.WriteMore() + } + marshalSampleHistogramPairJSON(unsafe.Pointer(&h), stream) + } + stream.WriteArrayEnd() } - s.Labels = cortexpb.FromLabelsToLabelAdapters(stream.Metric) - s.Samples = stream.Values - return nil + + stream.WriteObjectEnd() } func PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { @@ -184,8 +194,14 @@ func PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode(ptr unsafe.Poi func init() { jsoniter.RegisterTypeEncoderFunc("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode, func(unsafe.Pointer) bool { return false }) jsoniter.RegisterTypeDecoderFunc("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode) - jsoniter.RegisterTypeEncoderFunc("tripperware.SampleStream", encodeSampleStream, func(unsafe.Pointer) bool { return false }) + jsoniter.RegisterTypeEncoderFunc("tripperware.SampleStream", encodeSampleStream, marshalJSONIsEmpty) jsoniter.RegisterTypeDecoderFunc("tripperware.SampleStream", decodeSampleStream) + jsoniter.RegisterTypeEncoderFunc("tripperware.SampleHistogramPair", marshalSampleHistogramPairJSON, marshalJSONIsEmpty) + jsoniter.RegisterTypeDecoderFunc("tripperware.SampleHistogramPair", unmarshalSampleHistogramPairJSON) +} + +func marshalJSONIsEmpty(ptr unsafe.Pointer) bool { + return false } func EncodeTime(t int64) string { @@ -266,3 +282,171 @@ func StatsMerge(stats map[int64]*PrometheusResponseQueryableSamplesStatsPerStep) return result } + +// Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L84. +func unmarshalSampleHistogramPairJSON(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + p := (*SampleHistogramPair)(ptr) + if !iter.ReadArray() { + iter.ReportError("unmarshal SampleHistogramPair", "SampleHistogramPair must be [timestamp, {histogram}]") + return + } + p.TimestampMs = int64(model.Time(iter.ReadFloat64() * float64(time.Second/time.Millisecond))) + + if !iter.ReadArray() { + iter.ReportError("unmarshal SampleHistogramPair", "SamplePair missing histogram") + return + } + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + switch key { + case "count": + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("unmarshal SampleHistogramPair", "count of histogram is not a float") + return + } + p.Histogram.Count = f + case "sum": + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("unmarshal SampleHistogramPair", "sum of histogram is not a float") + return + } + p.Histogram.Sum = f + case "buckets": + for iter.ReadArray() { + b, err := unmarshalHistogramBucket(iter) + if err != nil { + iter.ReportError("unmarshal HistogramBucket", err.Error()) + return + } + p.Histogram.Buckets = append(p.Histogram.Buckets, b) + } + default: + iter.ReportError("unmarshal SampleHistogramPair", fmt.Sprint("unexpected key in histogram:", key)) + return + } + } + if iter.ReadArray() { + iter.ReportError("unmarshal SampleHistogramPair", "SampleHistogramPair has too many values, must be [timestamp, {histogram}]") + return + } +} + +// Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L252. +func unmarshalHistogramBucket(iter *jsoniter.Iterator) (*HistogramBucket, error) { + b := HistogramBucket{} + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + boundaries, err := iter.ReadNumber().Int64() + if err != nil { + return nil, err + } + b.Boundaries = int32(boundaries) + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Lower = f + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err = strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Upper = f + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err = strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Count = f + if iter.ReadArray() { + return nil, errors.New("HistogramBucket has too many values, must be [boundaries, lower, upper, count]") + } + return &b, nil +} + +// Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L137. +func marshalSampleHistogramPairJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*SampleHistogramPair)(ptr)) + stream.WriteArrayStart() + stream.WriteFloat64(float64(p.TimestampMs) / float64(time.Second/time.Millisecond)) + stream.WriteMore() + marshalHistogram(p.Histogram, stream) + stream.WriteArrayEnd() +} + +// MarshalHistogram marshals a histogram value using the passed jsoniter stream. +// It writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +// Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L329 +func marshalHistogram(h SampleHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + jsonutil.MarshalFloat(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + jsonutil.MarshalFloat(h.Sum, stream) + + bucketFound := false + for _, bucket := range h.Buckets { + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + marshalHistogramBucket(*bucket, stream) + } + + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +// marshalHistogramBucket writes something like: [ 3, "-0.25", "0.25", "3"] +// See marshalHistogram to understand what the numbers mean. +// Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L294. +func marshalHistogramBucket(b HistogramBucket, stream *jsoniter.Stream) { + stream.WriteArrayStart() + stream.WriteInt32(b.Boundaries) + stream.WriteMore() + jsonutil.MarshalFloat(b.Lower, stream) + stream.WriteMore() + jsonutil.MarshalFloat(b.Upper, stream) + stream.WriteMore() + jsonutil.MarshalFloat(b.Count, stream) + stream.WriteArrayEnd() +} diff --git a/pkg/querier/tripperware/query.pb.go b/pkg/querier/tripperware/query.pb.go index f8fb8f9203..5c46214404 100644 --- a/pkg/querier/tripperware/query.pb.go +++ b/pkg/querier/tripperware/query.pb.go @@ -4,6 +4,7 @@ package tripperware import ( + encoding_binary "encoding/binary" fmt "fmt" cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" @@ -28,8 +29,9 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type SampleStream struct { - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` - Samples []cortexpb.Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"values"` + Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` + Samples []cortexpb.Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"values"` + Histograms []SampleHistogramPair `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` } func (m *SampleStream) Reset() { *m = SampleStream{} } @@ -71,6 +73,190 @@ func (m *SampleStream) GetSamples() []cortexpb.Sample { return nil } +func (m *SampleStream) GetHistograms() []SampleHistogramPair { + if m != nil { + return m.Histograms + } + return nil +} + +type SampleHistogramPair struct { + TimestampMs int64 `protobuf:"varint,1,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + Histogram SampleHistogram `protobuf:"bytes,2,opt,name=histogram,proto3" json:"histogram"` +} + +func (m *SampleHistogramPair) Reset() { *m = SampleHistogramPair{} } +func (*SampleHistogramPair) ProtoMessage() {} +func (*SampleHistogramPair) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{1} +} +func (m *SampleHistogramPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleHistogramPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleHistogramPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleHistogramPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleHistogramPair.Merge(m, src) +} +func (m *SampleHistogramPair) XXX_Size() int { + return m.Size() +} +func (m *SampleHistogramPair) XXX_DiscardUnknown() { + xxx_messageInfo_SampleHistogramPair.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleHistogramPair proto.InternalMessageInfo + +func (m *SampleHistogramPair) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +func (m *SampleHistogramPair) GetHistogram() SampleHistogram { + if m != nil { + return m.Histogram + } + return SampleHistogram{} +} + +type SampleHistogram struct { + Count float64 `protobuf:"fixed64,1,opt,name=count,proto3" json:"count,omitempty"` + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + Buckets []*HistogramBucket `protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty"` +} + +func (m *SampleHistogram) Reset() { *m = SampleHistogram{} } +func (*SampleHistogram) ProtoMessage() {} +func (*SampleHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{2} +} +func (m *SampleHistogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleHistogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleHistogram.Merge(m, src) +} +func (m *SampleHistogram) XXX_Size() int { + return m.Size() +} +func (m *SampleHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_SampleHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleHistogram proto.InternalMessageInfo + +func (m *SampleHistogram) GetCount() float64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *SampleHistogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *SampleHistogram) GetBuckets() []*HistogramBucket { + if m != nil { + return m.Buckets + } + return nil +} + +type HistogramBucket struct { + Boundaries int32 `protobuf:"varint,1,opt,name=boundaries,proto3" json:"boundaries,omitempty"` + Lower float64 `protobuf:"fixed64,2,opt,name=lower,proto3" json:"lower,omitempty"` + Upper float64 `protobuf:"fixed64,3,opt,name=upper,proto3" json:"upper,omitempty"` + Count float64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *HistogramBucket) Reset() { *m = HistogramBucket{} } +func (*HistogramBucket) ProtoMessage() {} +func (*HistogramBucket) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{3} +} +func (m *HistogramBucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HistogramBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HistogramBucket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HistogramBucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_HistogramBucket.Merge(m, src) +} +func (m *HistogramBucket) XXX_Size() int { + return m.Size() +} +func (m *HistogramBucket) XXX_DiscardUnknown() { + xxx_messageInfo_HistogramBucket.DiscardUnknown(m) +} + +var xxx_messageInfo_HistogramBucket proto.InternalMessageInfo + +func (m *HistogramBucket) GetBoundaries() int32 { + if m != nil { + return m.Boundaries + } + return 0 +} + +func (m *HistogramBucket) GetLower() float64 { + if m != nil { + return m.Lower + } + return 0 +} + +func (m *HistogramBucket) GetUpper() float64 { + if m != nil { + return m.Upper + } + return 0 +} + +func (m *HistogramBucket) GetCount() float64 { + if m != nil { + return m.Count + } + return 0 +} + type PrometheusResponseStats struct { Samples *PrometheusResponseSamplesStats `protobuf:"bytes,1,opt,name=samples,proto3" json:"samples"` } @@ -78,7 +264,7 @@ type PrometheusResponseStats struct { func (m *PrometheusResponseStats) Reset() { *m = PrometheusResponseStats{} } func (*PrometheusResponseStats) ProtoMessage() {} func (*PrometheusResponseStats) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{1} + return fileDescriptor_5c6ac9b241082464, []int{4} } func (m *PrometheusResponseStats) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -122,7 +308,7 @@ type PrometheusResponseSamplesStats struct { func (m *PrometheusResponseSamplesStats) Reset() { *m = PrometheusResponseSamplesStats{} } func (*PrometheusResponseSamplesStats) ProtoMessage() {} func (*PrometheusResponseSamplesStats) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{2} + return fileDescriptor_5c6ac9b241082464, []int{5} } func (m *PrometheusResponseSamplesStats) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -175,7 +361,7 @@ func (m *PrometheusResponseQueryableSamplesStatsPerStep) Reset() { } func (*PrometheusResponseQueryableSamplesStatsPerStep) ProtoMessage() {} func (*PrometheusResponseQueryableSamplesStatsPerStep) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{3} + return fileDescriptor_5c6ac9b241082464, []int{6} } func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -226,7 +412,7 @@ type PrometheusResponseHeader struct { func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} } func (*PrometheusResponseHeader) ProtoMessage() {} func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{4} + return fileDescriptor_5c6ac9b241082464, []int{7} } func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -277,7 +463,7 @@ type PrometheusRequestHeader struct { func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} } func (*PrometheusRequestHeader) ProtoMessage() {} func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{5} + return fileDescriptor_5c6ac9b241082464, []int{8} } func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -322,6 +508,9 @@ func (m *PrometheusRequestHeader) GetValues() []string { func init() { proto.RegisterType((*SampleStream)(nil), "tripperware.SampleStream") + proto.RegisterType((*SampleHistogramPair)(nil), "tripperware.SampleHistogramPair") + proto.RegisterType((*SampleHistogram)(nil), "tripperware.SampleHistogram") + proto.RegisterType((*HistogramBucket)(nil), "tripperware.HistogramBucket") proto.RegisterType((*PrometheusResponseStats)(nil), "tripperware.PrometheusResponseStats") proto.RegisterType((*PrometheusResponseSamplesStats)(nil), "tripperware.PrometheusResponseSamplesStats") proto.RegisterType((*PrometheusResponseQueryableSamplesStatsPerStep)(nil), "tripperware.PrometheusResponseQueryableSamplesStatsPerStep") @@ -332,38 +521,48 @@ func init() { func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } var fileDescriptor_5c6ac9b241082464 = []byte{ - // 495 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x6e, 0xd4, 0x4e, - 0x10, 0xf6, 0x26, 0xbf, 0xdc, 0x4f, 0xac, 0x23, 0x84, 0x96, 0x20, 0x2e, 0x11, 0xac, 0x8f, 0xab, - 0x22, 0x21, 0x7c, 0x52, 0xa8, 0x80, 0x2a, 0xae, 0x90, 0xf8, 0x77, 0xd8, 0x88, 0x82, 0x06, 0xad, - 0x2f, 0xa3, 0x8b, 0xc1, 0xcb, 0x6e, 0x76, 0xd7, 0x10, 0x3a, 0x1e, 0x81, 0x86, 0x07, 0xa0, 0xe3, - 0x41, 0x28, 0x52, 0x5e, 0x19, 0x51, 0x58, 0x9c, 0xaf, 0x41, 0xae, 0xf2, 0x08, 0xc8, 0x6b, 0x3b, - 0x17, 0xe0, 0x74, 0x28, 0xa2, 0xdb, 0xfd, 0xbe, 0xf9, 0xe6, 0x9b, 0x9d, 0x9d, 0xc1, 0xee, 0x41, - 0x06, 0xea, 0xbd, 0x2f, 0x95, 0x30, 0x82, 0xb8, 0x46, 0x25, 0x52, 0x82, 0x7a, 0xc7, 0x14, 0x6c, - 0x6d, 0x8c, 0xc5, 0x58, 0x58, 0x7c, 0x50, 0x9d, 0xea, 0x90, 0xad, 0x3b, 0xe3, 0xc4, 0xec, 0x67, - 0xb1, 0x3f, 0x12, 0x7c, 0x30, 0x12, 0xca, 0xc0, 0xa1, 0x54, 0xe2, 0x15, 0x8c, 0x4c, 0x73, 0x1b, - 0xc8, 0xd7, 0xe3, 0x96, 0x88, 0x9b, 0x43, 0x2d, 0xed, 0x7f, 0x45, 0x78, 0x3d, 0x62, 0x5c, 0xa6, - 0x10, 0x19, 0x05, 0x8c, 0x93, 0x43, 0xdc, 0x49, 0x59, 0x0c, 0xa9, 0xee, 0xa2, 0xde, 0xea, 0xb6, - 0xbb, 0x73, 0xd9, 0x6f, 0x85, 0xfe, 0xc3, 0x0a, 0x1f, 0xb2, 0x44, 0x05, 0x0f, 0x8e, 0x72, 0xcf, - 0xf9, 0x96, 0x7b, 0xe7, 0x32, 0xae, 0xf5, 0xbb, 0x7b, 0x4c, 0x1a, 0x50, 0x65, 0xee, 0x75, 0x38, - 0x18, 0x95, 0x8c, 0xc2, 0xc6, 0x8f, 0xdc, 0xc5, 0xff, 0x6b, 0x5b, 0x89, 0xee, 0xae, 0x58, 0xeb, - 0x4b, 0x73, 0xeb, 0xba, 0xc4, 0xe0, 0x62, 0xe5, 0x5b, 0x49, 0xdf, 0xb2, 0x34, 0x03, 0x1d, 0xb6, - 0x82, 0x3e, 0xc7, 0x57, 0x87, 0x4a, 0x70, 0x30, 0xfb, 0x90, 0xe9, 0x10, 0xb4, 0x14, 0x6f, 0x34, - 0x44, 0x86, 0x19, 0x4d, 0xc2, 0x79, 0x5a, 0xd4, 0x43, 0xdb, 0xee, 0xce, 0x4d, 0xff, 0x4c, 0x47, - 0xfd, 0x05, 0xb2, 0x3a, 0xda, 0xaa, 0x03, 0xb7, 0xcc, 0xbd, 0x56, 0x3f, 0xb7, 0xfb, 0xb4, 0x82, - 0xe9, 0x72, 0x21, 0x79, 0x82, 0xaf, 0x18, 0x61, 0x58, 0xfa, 0xb4, 0xfa, 0x4a, 0x16, 0xa7, 0x2d, - 0x6b, 0x8b, 0x58, 0x0d, 0x36, 0xcb, 0xdc, 0x5b, 0x1c, 0x10, 0x2e, 0x86, 0xc9, 0x67, 0x84, 0xaf, - 0x2d, 0x64, 0x86, 0xa0, 0x22, 0x03, 0xb2, 0x69, 0xda, 0xbd, 0xbf, 0xbc, 0xee, 0x77, 0xb5, 0xad, - 0xb6, 0x49, 0x11, 0xf4, 0xca, 0xdc, 0x5b, 0x6a, 0x12, 0x2e, 0x65, 0xfb, 0x09, 0x3e, 0xa7, 0x23, - 0xd9, 0xc0, 0x6b, 0xf6, 0x2f, 0xeb, 0xb6, 0x84, 0xf5, 0x85, 0xdc, 0xc0, 0xeb, 0x26, 0xe1, 0xa0, - 0x0d, 0xe3, 0xf2, 0x25, 0xaf, 0xe6, 0xa1, 0x22, 0xdd, 0x53, 0xec, 0x91, 0xee, 0x3f, 0xc3, 0xdd, - 0x3f, 0xad, 0xee, 0x03, 0xdb, 0x03, 0x45, 0x36, 0xf1, 0x7f, 0x8f, 0x19, 0xaf, 0x73, 0x5e, 0x08, - 0xd6, 0xca, 0xdc, 0x43, 0xb7, 0x42, 0x0b, 0x91, 0xeb, 0xb8, 0xf3, 0xdc, 0xce, 0x8e, 0x6d, 0xd7, - 0x29, 0xd9, 0x80, 0xfd, 0xe8, 0xd7, 0x39, 0x3a, 0xc8, 0x40, 0x9b, 0x7f, 0x4d, 0x1a, 0xec, 0x4e, - 0xa6, 0xd4, 0x39, 0x9e, 0x52, 0xe7, 0x64, 0x4a, 0xd1, 0x87, 0x82, 0xa2, 0x2f, 0x05, 0x45, 0x47, - 0x05, 0x45, 0x93, 0x82, 0xa2, 0xef, 0x05, 0x45, 0x3f, 0x0a, 0xea, 0x9c, 0x14, 0x14, 0x7d, 0x9c, - 0x51, 0x67, 0x32, 0xa3, 0xce, 0xf1, 0x8c, 0x3a, 0x2f, 0xce, 0xee, 0x7d, 0xdc, 0xb1, 0xdb, 0x7a, - 0xfb, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x6b, 0xc1, 0x41, 0x1a, 0x04, 0x00, 0x00, + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xdf, 0x69, 0xa1, 0x84, 0x29, 0x11, 0x32, 0x60, 0x2c, 0x04, 0x67, 0xeb, 0x9e, 0x48, 0x8c, + 0x25, 0xc1, 0xc4, 0x44, 0xbd, 0xc8, 0x9e, 0x48, 0xfc, 0x87, 0x53, 0xe2, 0xc1, 0x8b, 0x99, 0x2d, + 0x93, 0xb2, 0xb2, 0xcb, 0x2c, 0x33, 0xb3, 0x82, 0x9e, 0xfc, 0x08, 0x5e, 0xbc, 0x78, 0xf3, 0xe6, + 0x47, 0xe1, 0xc8, 0x91, 0x78, 0xd8, 0xc8, 0x72, 0x31, 0x3d, 0xf1, 0x11, 0xcc, 0xcc, 0xec, 0xb6, + 0x85, 0x36, 0x35, 0xc4, 0xdb, 0xbc, 0xdf, 0x7b, 0xbf, 0xf7, 0x7b, 0xef, 0xed, 0x7b, 0x0b, 0xeb, + 0x87, 0x29, 0x13, 0x9f, 0x5a, 0x89, 0xe0, 0x8a, 0xa3, 0xba, 0x12, 0x61, 0x92, 0x30, 0x71, 0x44, + 0x05, 0x5b, 0x59, 0xea, 0xf2, 0x2e, 0x37, 0xf8, 0xba, 0x7e, 0xd9, 0x90, 0x95, 0xc7, 0xdd, 0x50, + 0xed, 0xa5, 0x41, 0xab, 0xc3, 0xe3, 0xf5, 0x0e, 0x17, 0x8a, 0x1d, 0x27, 0x82, 0x7f, 0x60, 0x1d, + 0x55, 0x58, 0xeb, 0xc9, 0x7e, 0xb7, 0x74, 0x04, 0xc5, 0xc3, 0x52, 0xbd, 0xef, 0x15, 0x38, 0xd7, + 0xa6, 0x71, 0x12, 0xb1, 0xb6, 0x12, 0x8c, 0xc6, 0xe8, 0x18, 0xd6, 0x22, 0x1a, 0xb0, 0x48, 0x36, + 0x40, 0xb3, 0xba, 0x56, 0xdf, 0x58, 0x6c, 0x95, 0xc4, 0xd6, 0x0b, 0x8d, 0x6f, 0xd3, 0x50, 0xf8, + 0xcf, 0x4f, 0x32, 0xd7, 0xf9, 0x95, 0xb9, 0x37, 0x12, 0xb6, 0xfc, 0xcd, 0x5d, 0x9a, 0x28, 0x26, + 0x7a, 0x99, 0x5b, 0x8b, 0x99, 0x12, 0x61, 0x87, 0x14, 0x7a, 0xe8, 0x09, 0x9c, 0x91, 0xa6, 0x12, + 0xd9, 0xa8, 0x18, 0xe9, 0x85, 0x81, 0xb4, 0x2d, 0xd1, 0xbf, 0xa5, 0x75, 0x35, 0xf5, 0x23, 0x8d, + 0x52, 0x26, 0x49, 0x49, 0x40, 0x3b, 0x10, 0xee, 0x85, 0x52, 0xf1, 0xae, 0xa0, 0xb1, 0x6c, 0x54, + 0x0d, 0xbd, 0xd9, 0x1a, 0x9a, 0x5c, 0x91, 0x61, 0xab, 0x0c, 0x32, 0x6d, 0xa0, 0x22, 0xdd, 0x10, + 0x97, 0x0c, 0xbd, 0xbd, 0xcf, 0x70, 0x71, 0x0c, 0x0d, 0xdd, 0x83, 0x73, 0x2a, 0x8c, 0x99, 0x54, + 0x34, 0x4e, 0xde, 0xc7, 0x7a, 0x50, 0x60, 0xad, 0x4a, 0xea, 0x7d, 0xec, 0xa5, 0x44, 0xcf, 0xe0, + 0x6c, 0x3f, 0x4f, 0xa3, 0xd2, 0x04, 0x6b, 0xf5, 0x8d, 0xd5, 0x49, 0xe5, 0xf8, 0x53, 0xba, 0x14, + 0x32, 0x20, 0x79, 0x87, 0x70, 0xfe, 0x5a, 0x0c, 0x5a, 0x82, 0xd3, 0x1d, 0x9e, 0x1e, 0x28, 0x23, + 0x08, 0x88, 0x35, 0xd0, 0x02, 0xac, 0xca, 0xd4, 0x8a, 0x00, 0xa2, 0x9f, 0xe8, 0x11, 0x9c, 0x09, + 0xd2, 0xce, 0x3e, 0x53, 0xe5, 0x24, 0xae, 0x4a, 0x0f, 0x44, 0x4d, 0x10, 0x29, 0x83, 0x3d, 0x09, + 0xe7, 0xaf, 0xf9, 0x10, 0x86, 0x30, 0xe0, 0xe9, 0xc1, 0x2e, 0x15, 0x21, 0xb3, 0x8d, 0x4e, 0x93, + 0x21, 0x44, 0x97, 0x14, 0xf1, 0x23, 0x26, 0x0a, 0x79, 0x6b, 0x68, 0x34, 0xd5, 0x72, 0x8d, 0xaa, + 0x45, 0x8d, 0x31, 0x28, 0x7f, 0x6a, 0xa8, 0x7c, 0x2f, 0x86, 0x77, 0xb6, 0x05, 0x8f, 0x99, 0xda, + 0x63, 0xa9, 0x24, 0x4c, 0x26, 0xfc, 0x40, 0xb2, 0xb6, 0xa2, 0x4a, 0x22, 0x32, 0x58, 0x08, 0x60, + 0x46, 0x78, 0xff, 0x4a, 0x1f, 0x63, 0x68, 0x36, 0xda, 0xb0, 0xfd, 0x7a, 0x2f, 0x73, 0x4b, 0x7e, + 0x7f, 0x51, 0xbc, 0x6f, 0x15, 0x88, 0x27, 0x13, 0xd1, 0x6b, 0x78, 0x5b, 0x71, 0x45, 0xa3, 0x37, + 0xfa, 0x08, 0x69, 0x10, 0x95, 0x5e, 0xfb, 0x9d, 0xfd, 0xe5, 0x5e, 0xe6, 0x8e, 0x0f, 0x20, 0xe3, + 0x61, 0xf4, 0x03, 0xc0, 0xd5, 0xb1, 0x9e, 0x6d, 0x26, 0xda, 0x8a, 0x25, 0xc5, 0xba, 0x3f, 0xfd, + 0x47, 0x77, 0xd7, 0xd9, 0xa6, 0xda, 0x22, 0x85, 0xdf, 0xec, 0x65, 0xee, 0x44, 0x11, 0x32, 0xd1, + 0xeb, 0x85, 0xf0, 0x86, 0x8a, 0xfa, 0x73, 0x9a, 0x2b, 0x2c, 0xd6, 0xdf, 0x1a, 0x23, 0xb7, 0x51, + 0x19, 0xb9, 0x0d, 0x6f, 0x07, 0x36, 0x46, 0xa5, 0xb6, 0x18, 0xdd, 0x65, 0x02, 0x2d, 0xc3, 0xa9, + 0x57, 0x34, 0xb6, 0x39, 0x67, 0xfd, 0xe9, 0x5e, 0xe6, 0x82, 0x07, 0xc4, 0x40, 0xe8, 0x2e, 0xac, + 0xbd, 0x35, 0x57, 0x6f, 0xc6, 0xd5, 0x77, 0x16, 0xa0, 0xd7, 0xbe, 0xba, 0x47, 0x87, 0x29, 0x93, + 0xea, 0x7f, 0x93, 0xfa, 0x9b, 0xa7, 0xe7, 0xd8, 0x39, 0x3b, 0xc7, 0xce, 0xe5, 0x39, 0x06, 0x5f, + 0x72, 0x0c, 0x7e, 0xe6, 0x18, 0x9c, 0xe4, 0x18, 0x9c, 0xe6, 0x18, 0xfc, 0xce, 0x31, 0xf8, 0x93, + 0x63, 0xe7, 0x32, 0xc7, 0xe0, 0xeb, 0x05, 0x76, 0x4e, 0x2f, 0xb0, 0x73, 0x76, 0x81, 0x9d, 0x77, + 0xc3, 0x7f, 0xec, 0xa0, 0x66, 0xfe, 0xb3, 0x0f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x33, 0x14, + 0x72, 0x0f, 0xd4, 0x05, 0x00, 0x00, } func (this *SampleStream) Equal(that interface{}) bool { @@ -401,6 +600,109 @@ func (this *SampleStream) Equal(that interface{}) bool { return false } } + if len(this.Histograms) != len(that1.Histograms) { + return false + } + for i := range this.Histograms { + if !this.Histograms[i].Equal(&that1.Histograms[i]) { + return false + } + } + return true +} +func (this *SampleHistogramPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleHistogramPair) + if !ok { + that2, ok := that.(SampleHistogramPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + if !this.Histogram.Equal(&that1.Histogram) { + return false + } + return true +} +func (this *SampleHistogram) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleHistogram) + if !ok { + that2, ok := that.(SampleHistogram) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Count != that1.Count { + return false + } + if this.Sum != that1.Sum { + return false + } + if len(this.Buckets) != len(that1.Buckets) { + return false + } + for i := range this.Buckets { + if !this.Buckets[i].Equal(that1.Buckets[i]) { + return false + } + } + return true +} +func (this *HistogramBucket) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HistogramBucket) + if !ok { + that2, ok := that.(HistogramBucket) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Boundaries != that1.Boundaries { + return false + } + if this.Lower != that1.Lower { + return false + } + if this.Upper != that1.Upper { + return false + } + if this.Count != that1.Count { + return false + } return true } func (this *PrometheusResponseStats) Equal(that interface{}) bool { @@ -554,7 +856,7 @@ func (this *SampleStream) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&tripperware.SampleStream{") s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") if this.Samples != nil { @@ -564,6 +866,51 @@ func (this *SampleStream) GoString() string { } s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") } + if this.Histograms != nil { + vs := make([]*SampleHistogramPair, len(this.Histograms)) + for i := range vs { + vs[i] = &this.Histograms[i] + } + s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleHistogramPair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&tripperware.SampleHistogramPair{") + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "Histogram: "+strings.Replace(this.Histogram.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleHistogram) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&tripperware.SampleHistogram{") + s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") + s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n") + if this.Buckets != nil { + s = append(s, "Buckets: "+fmt.Sprintf("%#v", this.Buckets)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HistogramBucket) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&tripperware.HistogramBucket{") + s = append(s, "Boundaries: "+fmt.Sprintf("%#v", this.Boundaries)+",\n") + s = append(s, "Lower: "+fmt.Sprintf("%#v", this.Lower)+",\n") + s = append(s, "Upper: "+fmt.Sprintf("%#v", this.Upper)+",\n") + s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -653,6 +1000,20 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Samples) > 0 { for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { { @@ -684,7 +1045,7 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PrometheusResponseStats) Marshal() (dAtA []byte, err error) { +func (m *SampleHistogramPair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -694,27 +1055,160 @@ func (m *PrometheusResponseStats) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PrometheusResponseStats) MarshalTo(dAtA []byte) (int, error) { +func (m *SampleHistogramPair) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PrometheusResponseStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SampleHistogramPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Samples != nil { - { - size, err := m.Samples.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.TimestampMs != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TimestampMs)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SampleHistogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SampleHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Buckets) > 0 { + for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x11 + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Count)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *HistogramBucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramBucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HistogramBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Count)))) + i-- + dAtA[i] = 0x21 + } + if m.Upper != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Upper)))) + i-- + dAtA[i] = 0x19 + } + if m.Lower != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Lower)))) + i-- + dAtA[i] = 0x11 + } + if m.Boundaries != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Boundaries)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PrometheusResponseStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrometheusResponseStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrometheusResponseStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Samples != nil { + { + size, err := m.Samples.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -901,6 +1395,68 @@ func (m *SampleStream) Size() (n int) { n += 1 + l + sovQuery(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *SampleHistogramPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimestampMs != 0 { + n += 1 + sovQuery(uint64(m.TimestampMs)) + } + l = m.Histogram.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *SampleHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.Buckets) > 0 { + for _, e := range m.Buckets { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *HistogramBucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Boundaries != 0 { + n += 1 + sovQuery(uint64(m.Boundaries)) + } + if m.Lower != 0 { + n += 9 + } + if m.Upper != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } return n } @@ -1003,9 +1559,56 @@ func (this *SampleStream) String() string { repeatedStringForSamples += fmt.Sprintf("%v", f) + "," } repeatedStringForSamples += "}" + repeatedStringForHistograms := "[]SampleHistogramPair{" + for _, f := range this.Histograms { + repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "SampleHistogramPair", "SampleHistogramPair", 1), `&`, ``, 1) + "," + } + repeatedStringForHistograms += "}" s := strings.Join([]string{`&SampleStream{`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Samples:` + repeatedStringForSamples + `,`, + `Histograms:` + repeatedStringForHistograms + `,`, + `}`, + }, "") + return s +} +func (this *SampleHistogramPair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SampleHistogramPair{`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `Histogram:` + strings.Replace(strings.Replace(this.Histogram.String(), "SampleHistogram", "SampleHistogram", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SampleHistogram) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuckets := "[]*HistogramBucket{" + for _, f := range this.Buckets { + repeatedStringForBuckets += strings.Replace(f.String(), "HistogramBucket", "HistogramBucket", 1) + "," + } + repeatedStringForBuckets += "}" + s := strings.Join([]string{`&SampleHistogram{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Sum:` + fmt.Sprintf("%v", this.Sum) + `,`, + `Buckets:` + repeatedStringForBuckets + `,`, + `}`, + }, "") + return s +} +func (this *HistogramBucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HistogramBucket{`, + `Boundaries:` + fmt.Sprintf("%v", this.Boundaries) + `,`, + `Lower:` + fmt.Sprintf("%v", this.Lower) + `,`, + `Upper:` + fmt.Sprintf("%v", this.Upper) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `}`, }, "") return s @@ -1174,6 +1777,359 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, SampleHistogramPair{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleHistogramPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleHistogramPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleHistogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Buckets = append(m.Buckets, &HistogramBucket{}) + if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HistogramBucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HistogramBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HistogramBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Boundaries", wireType) + } + m.Boundaries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Boundaries |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Lower", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Lower = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Upper", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Upper = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/pkg/querier/tripperware/query.proto b/pkg/querier/tripperware/query.proto index 9d3afacf2c..9664fcf528 100644 --- a/pkg/querier/tripperware/query.proto +++ b/pkg/querier/tripperware/query.proto @@ -13,6 +13,25 @@ option (gogoproto.unmarshaler_all) = true; message SampleStream { repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; repeated cortexpb.Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "values"]; + repeated SampleHistogramPair histograms = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "histograms"]; +} + +message SampleHistogramPair { + int64 timestamp_ms = 1; + SampleHistogram histogram = 2 [(gogoproto.nullable) = false]; +} + +message SampleHistogram { + double count = 1; + double sum = 2; + repeated HistogramBucket buckets = 3; +} + +message HistogramBucket { + int32 boundaries = 1; + double lower = 2; + double upper = 3; + double count = 4; } message PrometheusResponseStats { diff --git a/pkg/querier/tripperware/query_test.go b/pkg/querier/tripperware/query_test.go index 7ee3dcf869..08f149f43b 100644 --- a/pkg/querier/tripperware/query_test.go +++ b/pkg/querier/tripperware/query_test.go @@ -1,60 +1,195 @@ package tripperware import ( - stdjson "encoding/json" - "fmt" + "math" + "strconv" "testing" + "time" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/util" ) -func TestMarshalSampleStream(t *testing.T) { - t.Parallel() - for i, tc := range []struct { - sampleStream SampleStream +// Same as https://github.com/prometheus/client_golang/blob/v1.19.1/api/prometheus/v1/api_test.go#L1577. +func TestSampleHistogramPairJSONSerialization(t *testing.T) { + tests := []struct { + name string + point SampleHistogramPair + expected string }{ { - sampleStream: SampleStream{ - Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}}, + name: "empty histogram", + point: SampleHistogramPair{ + TimestampMs: 0, + Histogram: SampleHistogram{}, }, + expected: `[0,{"count":"0","sum":"0"}]`, }, { - sampleStream: SampleStream{ - Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{"foo": "bar"})), - Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}}, + name: "histogram with NaN/Inf and no buckets", + point: SampleHistogramPair{ + TimestampMs: 0, + Histogram: SampleHistogram{ + Count: math.NaN(), + Sum: math.Inf(1), + }, }, + expected: `[0,{"count":"NaN","sum":"+Inf"}]`, }, { - sampleStream: SampleStream{ - Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{"foo": "bar", "test": "test", "a": "b"})), - Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}, {Value: 2, TimestampMs: 2}, {Value: 3, TimestampMs: 3}}, + name: "six-bucket histogram", + point: SampleHistogramPair{ + TimestampMs: 1, + Histogram: testHistogram1, }, + expected: `[0.001,{"count":"13.5","sum":"3897.1","buckets":[[1,"-4870.992343051145","-4466.7196729968955","1"],[1,"-861.0779292198035","-789.6119426088657","2"],[1,"-558.3399591246119","-512","3"],[0,"2048","2233.3598364984477","1.5"],[0,"2896.3093757400984","3158.4477704354626","2.5"],[0,"4466.7196729968955","4870.992343051145","3.5"]]}]`, }, - } { - tc := tc - t.Run(fmt.Sprintf("test-case-%d", i), func(t *testing.T) { - t.Parallel() - out1, err := json.Marshal(tc.sampleStream) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b, err := json.Marshal(test.point) require.NoError(t, err) - out2, err := tc.sampleStream.MarshalJSON() + require.Equal(t, test.expected, string(b)) + + // To test Unmarshal we will Unmarshal then re-Marshal. This way we + // can do a string compare, otherwise NaN values don't show equivalence + // properly. + var sp SampleHistogramPair + err = json.Unmarshal(b, &sp) require.NoError(t, err) - require.Equal(t, out1, out2) + + b, err = json.Marshal(sp) + require.NoError(t, err) + require.Equal(t, test.expected, string(b)) }) } } -// MarshalJSON implements json.Marshaler. -func (s *SampleStream) MarshalJSON() ([]byte, error) { - stream := struct { - Metric model.Metric `json:"metric"` - Values []cortexpb.Sample `json:"values"` +// Same as https://github.com/prometheus/client_golang/blob/v1.19.1/api/prometheus/v1/api_test.go#L1682. +func TestSampleStreamJSONSerialization(t *testing.T) { + floats, histograms := generateData(1, 5) + + tests := []struct { + name string + stream SampleStream + expectedJSON string }{ - Metric: cortexpb.FromLabelAdaptersToMetric(s.Labels), - Values: s.Samples, + { + "floats", + *floats[0], + `{"metric":{"__name__":"timeseries_0","foo":"bar"},"values":[[1677587259.055,"1"],[1677587244.055,"2"],[1677587229.055,"3"],[1677587214.055,"4"],[1677587199.055,"5"]]}`, + }, + { + "histograms", + *histograms[0], + `{"metric":{"__name__":"timeseries_0","foo":"bar"},"histograms":[[1677587259.055,{"count":"13.5","sum":"0.1","buckets":[[1,"-4870.992343051145","-4466.7196729968955","1"],[1,"-861.0779292198035","-789.6119426088657","2"],[1,"-558.3399591246119","-512","3"],[0,"2048","2233.3598364984477","1.5"],[0,"2896.3093757400984","3158.4477704354626","2.5"],[0,"4466.7196729968955","4870.992343051145","3.5"]]}],[1677587244.055,{"count":"27","sum":"0.2","buckets":[[1,"-4870.992343051145","-4466.7196729968955","2"],[1,"-861.0779292198035","-789.6119426088657","4"],[1,"-558.3399591246119","-512","6"],[0,"2048","2233.3598364984477","3"],[0,"2896.3093757400984","3158.4477704354626","5"],[0,"4466.7196729968955","4870.992343051145","7"]]}],[1677587229.055,{"count":"40.5","sum":"0.30000000000000004","buckets":[[1,"-4870.992343051145","-4466.7196729968955","3"],[1,"-861.0779292198035","-789.6119426088657","6"],[1,"-558.3399591246119","-512","9"],[0,"2048","2233.3598364984477","4.5"],[0,"2896.3093757400984","3158.4477704354626","7.5"],[0,"4466.7196729968955","4870.992343051145","10.5"]]}],[1677587214.055,{"count":"54","sum":"0.4","buckets":[[1,"-4870.992343051145","-4466.7196729968955","4"],[1,"-861.0779292198035","-789.6119426088657","8"],[1,"-558.3399591246119","-512","12"],[0,"2048","2233.3598364984477","6"],[0,"2896.3093757400984","3158.4477704354626","10"],[0,"4466.7196729968955","4870.992343051145","14"]]}],[1677587199.055,{"count":"67.5","sum":"0.5","buckets":[[1,"-4870.992343051145","-4466.7196729968955","5"],[1,"-861.0779292198035","-789.6119426088657","10"],[1,"-558.3399591246119","-512","15"],[0,"2048","2233.3598364984477","7.5"],[0,"2896.3093757400984","3158.4477704354626","12.5"],[0,"4466.7196729968955","4870.992343051145","17.5"]]}]]}`, + }, + { + "both", + SampleStream{ + Labels: floats[0].Labels, + Samples: floats[0].Samples, + Histograms: histograms[0].Histograms, + }, + `{"metric":{"__name__":"timeseries_0","foo":"bar"},"values":[[1677587259.055,"1"],[1677587244.055,"2"],[1677587229.055,"3"],[1677587214.055,"4"],[1677587199.055,"5"]],"histograms":[[1677587259.055,{"count":"13.5","sum":"0.1","buckets":[[1,"-4870.992343051145","-4466.7196729968955","1"],[1,"-861.0779292198035","-789.6119426088657","2"],[1,"-558.3399591246119","-512","3"],[0,"2048","2233.3598364984477","1.5"],[0,"2896.3093757400984","3158.4477704354626","2.5"],[0,"4466.7196729968955","4870.992343051145","3.5"]]}],[1677587244.055,{"count":"27","sum":"0.2","buckets":[[1,"-4870.992343051145","-4466.7196729968955","2"],[1,"-861.0779292198035","-789.6119426088657","4"],[1,"-558.3399591246119","-512","6"],[0,"2048","2233.3598364984477","3"],[0,"2896.3093757400984","3158.4477704354626","5"],[0,"4466.7196729968955","4870.992343051145","7"]]}],[1677587229.055,{"count":"40.5","sum":"0.30000000000000004","buckets":[[1,"-4870.992343051145","-4466.7196729968955","3"],[1,"-861.0779292198035","-789.6119426088657","6"],[1,"-558.3399591246119","-512","9"],[0,"2048","2233.3598364984477","4.5"],[0,"2896.3093757400984","3158.4477704354626","7.5"],[0,"4466.7196729968955","4870.992343051145","10.5"]]}],[1677587214.055,{"count":"54","sum":"0.4","buckets":[[1,"-4870.992343051145","-4466.7196729968955","4"],[1,"-861.0779292198035","-789.6119426088657","8"],[1,"-558.3399591246119","-512","12"],[0,"2048","2233.3598364984477","6"],[0,"2896.3093757400984","3158.4477704354626","10"],[0,"4466.7196729968955","4870.992343051145","14"]]}],[1677587199.055,{"count":"67.5","sum":"0.5","buckets":[[1,"-4870.992343051145","-4466.7196729968955","5"],[1,"-861.0779292198035","-789.6119426088657","10"],[1,"-558.3399591246119","-512","15"],[0,"2048","2233.3598364984477","7.5"],[0,"2896.3093757400984","3158.4477704354626","12.5"],[0,"4466.7196729968955","4870.992343051145","17.5"]]}]]}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b, err := json.Marshal(test.stream) + require.NoError(t, err) + require.Equal(t, test.expectedJSON, string(b)) + + var stream SampleStream + err = json.Unmarshal(b, &stream) + require.NoError(t, err) + require.Equal(t, test.stream, stream) + }) + } +} + +func generateData(timeseries, datapoints int) (floatMatrix, histogramMatrix []*SampleStream) { + for i := 0; i < timeseries; i++ { + lset := labels.FromMap(map[string]string{ + model.MetricNameLabel: "timeseries_" + strconv.Itoa(i), + "foo": "bar", + }) + now := model.Time(1677587274055).Time() + floats := make([]cortexpb.Sample, datapoints) + histograms := make([]SampleHistogramPair, datapoints) + + for x := datapoints; x > 0; x-- { + f := float64(x) + floats[x-1] = cortexpb.Sample{ + // Set the time back assuming a 15s interval. Since this is used for + // Marshal/Unmarshal testing the actual interval doesn't matter. + TimestampMs: util.TimeToMillis(now.Add(time.Second * -15 * time.Duration(x))), + Value: f, + } + histograms[x-1] = SampleHistogramPair{ + TimestampMs: util.TimeToMillis(now.Add(time.Second * -15 * time.Duration(x))), + Histogram: SampleHistogram{ + Count: 13.5 * f, + Sum: .1 * f, + Buckets: []*HistogramBucket{ + { + Boundaries: 1, + Lower: -4870.992343051145, + Upper: -4466.7196729968955, + Count: 1 * f, + }, + { + Boundaries: 1, + Lower: -861.0779292198035, + Upper: -789.6119426088657, + Count: 2 * f, + }, + { + Boundaries: 1, + Lower: -558.3399591246119, + Upper: -512, + Count: 3 * f, + }, + { + Boundaries: 0, + Lower: 2048, + Upper: 2233.3598364984477, + Count: 1.5 * f, + }, + { + Boundaries: 0, + Lower: 2896.3093757400984, + Upper: 3158.4477704354626, + Count: 2.5 * f, + }, + { + Boundaries: 0, + Lower: 4466.7196729968955, + Upper: 4870.992343051145, + Count: 3.5 * f, + }, + }, + }, + } + } + + fss := &SampleStream{ + Labels: cortexpb.FromLabelsToLabelAdapters(lset), + Samples: floats, + } + hss := &SampleStream{ + Labels: cortexpb.FromLabelsToLabelAdapters(lset), + Histograms: histograms, + } + + floatMatrix = append(floatMatrix, fss) + histogramMatrix = append(histogramMatrix, hss) } - return stdjson.Marshal(stream) + return } diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go index 65a68ea73f..3b10cc29a6 100644 --- a/pkg/querier/worker/frontend_processor_test.go +++ b/pkg/querier/worker/frontend_processor_test.go @@ -21,7 +21,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { defer cancel() // We use random port here, hopefully without any gRPC server. - cc, err := grpc.DialContext(ctx, "localhost:999", grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.NewClient("localhost:999", grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) cfg := Config{} @@ -55,7 +55,7 @@ func TestContextCancelStopsProcess(t *testing.T) { defer cancel() // We use random port here, hopefully without any gRPC server. - cc, err := grpc.DialContext(ctx, "localhost:999", grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.NewClient("localhost:999", grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) pm := newProcessorManager(ctx, &mockProcessor{}, cc, "test") diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 7ee7419064..04d71cc69c 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -231,7 +231,7 @@ func (sp *schedulerProcessor) createFrontendClient(addr string) (client.PoolClie return nil, err } - conn, err := grpc.Dial(addr, opts...) + conn, err := grpc.NewClient(addr, opts...) if err != nil { return nil, err } diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index b93ff746db..3fc87ee4c6 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -193,7 +193,7 @@ func (w *querierWorker) AddressAdded(address string) { } level.Info(w.log).Log("msg", "adding connection", "addr", address) - conn, err := w.connect(context.Background(), address) + conn, err := w.connect(address) if err != nil { level.Error(w.log).Log("msg", "error connecting", "addr", address, "err", err) return @@ -259,14 +259,14 @@ func (w *querierWorker) resetConcurrency() { } } -func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) { +func (w *querierWorker) connect(address string) (*grpc.ClientConn, error) { // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. opts, err := w.cfg.GRPCClientConfig.DialOption(nil, nil) if err != nil { return nil, err } - conn, err := grpc.DialContext(ctx, address, opts...) + conn, err := grpc.NewClient(address, opts...) if err != nil { return nil, err } diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index e4feb6ce3f..d94e88e5f5 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -827,7 +827,7 @@ func TestMemberlistJoinOnStarting(t *testing.T) { return mkv2.memberlist.NumMembers() } - poll(t, 5*time.Second, 2, membersFunc) + poll(t, 1*time.Minute, 2, membersFunc) } func getFreePorts(count int) ([]int, error) { @@ -1103,20 +1103,20 @@ func TestRejoin(t *testing.T) { return mkv2.memberlist.NumMembers() } - poll(t, 5*time.Second, 2, membersFunc) + poll(t, 1*time.Minute, 2, membersFunc) // Shutdown first KV require.NoError(t, services.StopAndAwaitTerminated(context.Background(), mkv1)) // Second KV should see single member now. - poll(t, 5*time.Second, 1, membersFunc) + poll(t, 1*time.Minute, 1, membersFunc) // Let's start first KV again. It is not configured to join the cluster, but KV2 is rejoining. mkv1 = NewKV(cfg1, log.NewNopLogger(), &dnsProviderMock{}, prometheus.NewPedanticRegistry()) require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv1)) defer services.StopAndAwaitTerminated(context.Background(), mkv1) //nolint:errcheck - poll(t, 5*time.Second, 2, membersFunc) + poll(t, 1*time.Minute, 2, membersFunc) } func TestMessageBuffer(t *testing.T) { diff --git a/pkg/ring/lifecycler_test.go b/pkg/ring/lifecycler_test.go index 62b5853d66..51d07028b7 100644 --- a/pkg/ring/lifecycler_test.go +++ b/pkg/ring/lifecycler_test.go @@ -494,17 +494,17 @@ func TestCheckReady_CheckRingHealth(t *testing.T) { }{ "should wait until the self instance is ACTIVE and healthy in the ring when 'check ring health' is disabled": { checkRingHealthEnabled: false, - firstJoinAfter: time.Second, - secondJoinAfter: 3 * time.Second, - expectedFirstMinReady: time.Second, - expectedFirstMaxReady: 2 * time.Second, + firstJoinAfter: 1000 * time.Millisecond, + secondJoinAfter: 3000 * time.Millisecond, + expectedFirstMinReady: 900 * time.Millisecond, + expectedFirstMaxReady: 1900 * time.Millisecond, }, "should wait until all instances are ACTIVE and healthy in the ring when 'check ring health' is enabled": { checkRingHealthEnabled: true, - firstJoinAfter: time.Second, - secondJoinAfter: 3 * time.Second, - expectedFirstMinReady: 3 * time.Second, - expectedFirstMaxReady: 4 * time.Second, + firstJoinAfter: 1000 * time.Millisecond, + secondJoinAfter: 3000 * time.Millisecond, + expectedFirstMinReady: 2900 * time.Millisecond, + expectedFirstMaxReady: 3900 * time.Millisecond, }, } diff --git a/pkg/ruler/client_pool.go b/pkg/ruler/client_pool.go index 238adfdb77..0bb5ff05f1 100644 --- a/pkg/ruler/client_pool.go +++ b/pkg/ruler/client_pool.go @@ -70,7 +70,7 @@ func dialRulerClient(clientCfg grpcclient.Config, addr string, requestDuration * return nil, err } - conn, err := grpc.Dial(addr, opts...) + conn, err := grpc.NewClient(addr, opts...) if err != nil { return nil, errors.Wrapf(err, "failed to dial ruler %s", addr) } diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index eb0e5ce9e4..9afbf94fa0 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -3,6 +3,7 @@ package ruler import ( "context" "fmt" + "io" "net/http" "os" "sync" @@ -293,6 +294,8 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string, userManag return n.notifier, nil } + logger := log.With(r.logger, "user", userID) + n = newRulerNotifier(¬ifier.Options{ QueueCapacity: r.cfg.NotificationQueueCapacity, Registerer: userManagerRegistry, @@ -309,9 +312,27 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string, userManag defer sp.Finish() ctx = ot.ContextWithSpan(ctx, sp) _ = ot.GlobalTracer().Inject(sp.Context(), ot.HTTPHeaders, ot.HTTPHeadersCarrier(req.Header)) - return ctxhttp.Do(ctx, client, req) + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + level.Error(logger).Log("msg", "error occurred while sending alerts", "error", err) + return resp, err + } + defer resp.Body.Close() + if resp.StatusCode/100 != 2 { + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + level.Error(logger).Log("msg", "error reading response body", "error", err, "response code", resp.StatusCode) + return resp, err + } + customErrorMessage := string(bodyBytes) + if len(customErrorMessage) >= 150 { + customErrorMessage = customErrorMessage[:150] + } + level.Error(logger).Log("msg", "error occurred sending notification", "error", customErrorMessage, "response code", resp.StatusCode) + } + return resp, err }, - }, log.With(r.logger, "user", userID), userManagerRegistry, r.notifiersDiscoveryMetrics) + }, logger, userManagerRegistry, r.notifiersDiscoveryMetrics) n.run() diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index fda8fb5d5a..f04d02d6d4 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -2041,7 +2041,7 @@ func TestRecoverAlertsPostOutage(t *testing.T) { {Name: labels.MetricName, Value: "ALERTS_FOR_STATE"}, {Name: labels.AlertName, Value: mockRules["user1"][0].GetRules()[0].Alert}, }, - Chunks: querier.ConvertToChunks(t, []cortexpb.Sample{{TimestampMs: downAtTimeMs, Value: float64(downAtActiveSec)}}), + Chunks: querier.ConvertToChunks(t, []cortexpb.Sample{{TimestampMs: downAtTimeMs, Value: float64(downAtActiveSec)}}, nil), }, }, }, nil) diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 128a9c0631..b3088ccddc 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -465,7 +465,7 @@ func (s *Scheduler) forwardErrorToFrontend(ctx context.Context, req *schedulerRe return } - conn, err := grpc.DialContext(ctx, req.frontendAddress, opts...) + conn, err := grpc.NewClient(req.frontendAddress, opts...) if err != nil { level.Warn(s.log).Log("msg", "failed to create gRPC connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) return diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 798843284e..cc16448358 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -60,7 +60,7 @@ func setupScheduler(t *testing.T, reg prometheus.Registerer) (*Scheduler, schedu _ = l.Close() }) - c, err := grpc.Dial(l.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + c, err := grpc.NewClient(l.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) t.Cleanup(func() { diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go index f889c12407..7a72e65133 100644 --- a/pkg/storage/bucket/s3/bucket_client.go +++ b/pkg/storage/bucket/s3/bucket_client.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/thanos-io/objstore" "github.com/thanos-io/objstore/providers/s3" @@ -127,6 +128,10 @@ func (b *BucketWithRetries) retry(ctx context.Context, f func() error, operation if lastErr == nil { return nil } + // No need to retry when context was already canceled. + if errors.Is(lastErr, context.Canceled) || errors.Is(lastErr, context.DeadlineExceeded) { + return lastErr + } if b.bucket.IsObjNotFoundErr(lastErr) || b.bucket.IsAccessDeniedErr(lastErr) { return lastErr } diff --git a/pkg/storage/bucket/s3/bucket_client_test.go b/pkg/storage/bucket/s3/bucket_client_test.go index 2de42c8857..2454970517 100644 --- a/pkg/storage/bucket/s3/bucket_client_test.go +++ b/pkg/storage/bucket/s3/bucket_client_test.go @@ -34,6 +34,14 @@ func TestBucketWithRetries_ShouldRetry(t *testing.T) { err: errKeyDenied, retryCount: 1, }, + "should not retry when context canceled": { + err: context.Canceled, + retryCount: 1, + }, + "should not retry when context deadline exceeded": { + err: context.DeadlineExceeded, + retryCount: 1, + }, } for name, tc := range cases { diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 644b855c97..5c9f0c23cb 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -159,6 +159,9 @@ type TSDBConfig struct { // OutOfOrderCapMax is maximum capacity for OOO chunks (in samples). OutOfOrderCapMax int64 `yaml:"out_of_order_cap_max"` + + // Enable native histogram ingestion. + EnableNativeHistograms bool `yaml:"enable_native_histograms"` } // RegisterFlags registers the TSDBConfig flags. @@ -186,6 +189,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") f.Int64Var(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", tsdb.DefaultOutOfOrderCapMax, "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.") + f.BoolVar(&cfg.EnableNativeHistograms, "blocks-storage.tsdb.enable-native-histograms", false, "[EXPERIMENTAL] True to enable native histogram.") } // Validate the config. diff --git a/pkg/storage/tsdb/inmemory_index_cache.go b/pkg/storage/tsdb/inmemory_index_cache.go index e2e481182f..1530bf99f8 100644 --- a/pkg/storage/tsdb/inmemory_index_cache.go +++ b/pkg/storage/tsdb/inmemory_index_cache.go @@ -55,25 +55,25 @@ func NewInMemoryIndexCacheWithConfig(logger log.Logger, commonMetrics *storecach Name: "thanos_store_index_cache_items_added_total", Help: "Total number of items that were added to the index cache.", }, []string{"item_type"}) - c.added.WithLabelValues(cacheTypePostings) - c.added.WithLabelValues(cacheTypeSeries) - c.added.WithLabelValues(cacheTypeExpandedPostings) + c.added.WithLabelValues(storecache.CacheTypePostings) + c.added.WithLabelValues(storecache.CacheTypeSeries) + c.added.WithLabelValues(storecache.CacheTypeExpandedPostings) - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypePostings, tenancy.DefaultTenant) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeSeries, tenancy.DefaultTenant) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeExpandedPostings, tenancy.DefaultTenant) c.overflow = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_items_overflowed_total", Help: "Total number of items that could not be added to the cache due to being too big.", }, []string{"item_type"}) - c.overflow.WithLabelValues(cacheTypePostings) - c.overflow.WithLabelValues(cacheTypeSeries) - c.overflow.WithLabelValues(cacheTypeExpandedPostings) + c.overflow.WithLabelValues(storecache.CacheTypePostings) + c.overflow.WithLabelValues(storecache.CacheTypeSeries) + c.overflow.WithLabelValues(storecache.CacheTypeExpandedPostings) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypePostings, tenancy.DefaultTenant) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeSeries, tenancy.DefaultTenant) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeExpandedPostings, tenancy.DefaultTenant) c.cache = fastcache.New(int(config.MaxSize)) level.Info(logger).Log( @@ -132,14 +132,14 @@ func copyToKey(l labels.Label) storecache.CacheKeyPostings { // StorePostings sets the postings identified by the ulid and label to the value v, // if the postings already exists in the cache it is not mutated. func (c *InMemoryIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { - c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypePostings, tenant).Observe(float64(len(v))) - c.set(cacheTypePostings, storecache.CacheKey{Block: blockID.String(), Key: copyToKey(l)}, v) + c.commonMetrics.DataSizeBytes.WithLabelValues(storecache.CacheTypePostings, tenant).Observe(float64(len(v))) + c.set(storecache.CacheTypePostings, storecache.CacheKey{Block: blockID.String(), Key: copyToKey(l)}, v) } // FetchMultiPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { - timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypePostings, tenant)) + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(storecache.CacheTypePostings, tenant)) defer timer.ObserveDuration() hits = map[labels.Label][]byte{} @@ -149,8 +149,8 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli hit := 0 for _, key := range keys { if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(hit)) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(hit)) return hits, misses } requests++ @@ -162,29 +162,29 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli misses = append(misses, key) } - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(hit)) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(hit)) return hits, misses } // StoreExpandedPostings stores expanded postings for a set of label matchers. func (c *InMemoryIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { - c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypeExpandedPostings, tenant).Observe(float64(len(v))) - c.set(cacheTypeExpandedPostings, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeyExpandedPostings(storecache.LabelMatchersToString(matchers))}, v) + c.commonMetrics.DataSizeBytes.WithLabelValues(storecache.CacheTypeExpandedPostings, tenant).Observe(float64(len(v))) + c.set(storecache.CacheTypeExpandedPostings, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeyExpandedPostings(storecache.LabelMatchersToString(matchers))}, v) } // FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. func (c *InMemoryIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { - timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypeExpandedPostings, tenant)) + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(storecache.CacheTypeExpandedPostings, tenant)) defer timer.ObserveDuration() if ctx.Err() != nil { return nil, false } - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Inc() + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeExpandedPostings, tenant).Inc() if b, ok := c.get(storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeyExpandedPostings(storecache.LabelMatchersToString(matchers))}); ok { - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Inc() + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeExpandedPostings, tenant).Inc() return b, true } return nil, false @@ -193,14 +193,14 @@ func (c *InMemoryIndexCache) FetchExpandedPostings(ctx context.Context, blockID // StoreSeries sets the series identified by the ulid and id to the value v, // if the series already exists in the cache it is not mutated. func (c *InMemoryIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { - c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypeSeries, tenant).Observe(float64(len(v))) - c.set(cacheTypeSeries, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeySeries(id)}, v) + c.commonMetrics.DataSizeBytes.WithLabelValues(storecache.CacheTypeSeries, tenant).Observe(float64(len(v))) + c.set(storecache.CacheTypeSeries, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeySeries(id)}, v) } // FetchMultiSeries fetches multiple series - each identified by ID - from the cache // and returns a map containing cache hits, along with a list of missing IDs. func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { - timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypeSeries, tenant)) + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(storecache.CacheTypeSeries, tenant)) defer timer.ObserveDuration() hits = map[storage.SeriesRef][]byte{} @@ -210,8 +210,8 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. hit := 0 for _, id := range ids { if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(hit)) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(hit)) return hits, misses } requests++ @@ -223,8 +223,8 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. misses = append(misses, id) } - c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(hit)) + c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(hit)) return hits, misses } diff --git a/pkg/storage/tsdb/inmemory_index_cache_test.go b/pkg/storage/tsdb/inmemory_index_cache_test.go index 530d26575c..aca56b5cab 100644 --- a/pkg/storage/tsdb/inmemory_index_cache_test.go +++ b/pkg/storage/tsdb/inmemory_index_cache_test.go @@ -58,7 +58,7 @@ func TestInMemoryIndexCache_UpdateItem(t *testing.T) { get func(storage.SeriesRef) ([]byte, bool) }{ { - typ: cacheTypePostings, + typ: storecache.CacheTypePostings, set: func(id storage.SeriesRef, b []byte) { cache.StorePostings(uid(id), lbl, b, tenancy.DefaultTenant) }, get: func(id storage.SeriesRef) ([]byte, bool) { hits, _ := cache.FetchMultiPostings(ctx, uid(id), []labels.Label{lbl}, tenancy.DefaultTenant) @@ -68,7 +68,7 @@ func TestInMemoryIndexCache_UpdateItem(t *testing.T) { }, }, { - typ: cacheTypeSeries, + typ: storecache.CacheTypeSeries, set: func(id storage.SeriesRef, b []byte) { cache.StoreSeries(uid(id), id, b, tenancy.DefaultTenant) }, get: func(id storage.SeriesRef) ([]byte, bool) { hits, _ := cache.FetchMultiSeries(ctx, uid(id), []storage.SeriesRef{id}, tenancy.DefaultTenant) @@ -78,7 +78,7 @@ func TestInMemoryIndexCache_UpdateItem(t *testing.T) { }, }, { - typ: cacheTypeExpandedPostings, + typ: storecache.CacheTypeExpandedPostings, set: func(id storage.SeriesRef, b []byte) { cache.StoreExpandedPostings(uid(id), []*labels.Matcher{matcher}, b, tenancy.DefaultTenant) }, @@ -128,7 +128,7 @@ func TestInMemoryIndexCacheSetOverflow(t *testing.T) { } cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), nil, nil, config) testutil.Ok(t, err) - counter := cache.overflow.WithLabelValues(cacheTypeSeries) + counter := cache.overflow.WithLabelValues(storecache.CacheTypeSeries) id := ulid.MustNew(ulid.Now(), nil) // Insert a small value won't trigger item overflow. cache.StoreSeries(id, 1, []byte("0"), tenancy.DefaultTenant) diff --git a/pkg/storage/tsdb/multilevel_cache.go b/pkg/storage/tsdb/multilevel_cache.go index 7e7a35eadc..5d41ac118f 100644 --- a/pkg/storage/tsdb/multilevel_cache.go +++ b/pkg/storage/tsdb/multilevel_cache.go @@ -3,7 +3,6 @@ package tsdb import ( "context" "errors" - "sync" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" @@ -15,40 +14,31 @@ import ( "golang.org/x/exp/slices" ) -const ( - cacheTypePostings string = "Postings" - cacheTypeExpandedPostings string = "ExpandedPostings" - cacheTypeSeries string = "Series" -) - type multiLevelCache struct { postingsCaches, seriesCaches, expandedPostingCaches []storecache.IndexCache - fetchLatency *prometheus.HistogramVec - backFillLatency *prometheus.HistogramVec - backfillProcessor *cacheutil.AsyncOperationProcessor - backfillDroppedPostings prometheus.Counter - backfillDroppedSeries prometheus.Counter - backfillDroppedExpandedPostings prometheus.Counter + fetchLatency *prometheus.HistogramVec + backFillLatency *prometheus.HistogramVec + backfillProcessor *cacheutil.AsyncOperationProcessor + backfillDroppedItems map[string]prometheus.Counter + storeDroppedItems map[string]prometheus.Counter maxBackfillItems int } func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { - wg := sync.WaitGroup{} - wg.Add(len(m.postingsCaches)) for _, c := range m.postingsCaches { cache := c - go func() { - defer wg.Done() + if err := m.backfillProcessor.EnqueueAsync(func() { cache.StorePostings(blockID, l, v, tenant) - }() + }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { + m.storeDroppedItems[storecache.CacheTypePostings].Inc() + } } - wg.Wait() } func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { - timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(cacheTypePostings)) + timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypePostings)) defer timer.ObserveDuration() misses = keys @@ -78,7 +68,7 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U } defer func() { - backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypePostings)) + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypePostings)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { i := i @@ -92,12 +82,12 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U m.postingsCaches[i].StorePostings(blockID, lbl, b, tenant) cnt++ if cnt == m.maxBackfillItems { - m.backfillDroppedPostings.Add(float64(len(values) - cnt)) + m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values) - cnt)) return } } }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { - m.backfillDroppedPostings.Add(float64(len(values))) + m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values))) } } }() @@ -106,20 +96,18 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U } func (m *multiLevelCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { - wg := sync.WaitGroup{} - wg.Add(len(m.expandedPostingCaches)) for _, c := range m.expandedPostingCaches { cache := c - go func() { - defer wg.Done() + if err := m.backfillProcessor.EnqueueAsync(func() { cache.StoreExpandedPostings(blockID, matchers, v, tenant) - }() + }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { + m.storeDroppedItems[storecache.CacheTypeExpandedPostings].Inc() + } } - wg.Wait() } func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { - timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(cacheTypeExpandedPostings)) + timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeExpandedPostings)) defer timer.ObserveDuration() for i, c := range m.expandedPostingCaches { @@ -128,11 +116,11 @@ func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID uli } if d, h := c.FetchExpandedPostings(ctx, blockID, matchers, tenant); h { if i > 0 { - backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypeExpandedPostings)) + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeExpandedPostings)) if err := m.backfillProcessor.EnqueueAsync(func() { m.expandedPostingCaches[i-1].StoreExpandedPostings(blockID, matchers, d, tenant) }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { - m.backfillDroppedExpandedPostings.Inc() + m.backfillDroppedItems[storecache.CacheTypeExpandedPostings].Inc() } backFillTimer.ObserveDuration() } @@ -144,20 +132,18 @@ func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID uli } func (m *multiLevelCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { - wg := sync.WaitGroup{} - wg.Add(len(m.seriesCaches)) for _, c := range m.seriesCaches { cache := c - go func() { - defer wg.Done() + if err := m.backfillProcessor.EnqueueAsync(func() { cache.StoreSeries(blockID, id, v, tenant) - }() + }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { + m.storeDroppedItems[storecache.CacheTypeSeries].Inc() + } } - wg.Wait() } func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { - timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(cacheTypeSeries)) + timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeSeries)) defer timer.ObserveDuration() misses = ids @@ -188,7 +174,7 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI } defer func() { - backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypeSeries)) + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeSeries)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { i := i @@ -202,12 +188,12 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI m.seriesCaches[i].StoreSeries(blockID, ref, b, tenant) cnt++ if cnt == m.maxBackfillItems { - m.backfillDroppedSeries.Add(float64(len(values) - cnt)) + m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values) - cnt)) return } } }); errors.Is(err, cacheutil.ErrAsyncBufferFull) { - m.backfillDroppedSeries.Add(float64(len(values))) + m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values))) } } }() @@ -237,10 +223,14 @@ func newMultiLevelCache(reg prometheus.Registerer, cfg MultiLevelIndexCacheConfi Name: "cortex_store_multilevel_index_cache_backfill_dropped_items_total", Help: "Total number of items dropped due to async buffer full when backfilling multilevel cache ", }, []string{"item_type"}) + storeDroppedItems := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_store_multilevel_index_cache_store_dropped_items_total", + Help: "Total number of items dropped due to async buffer full when storing multilevel cache ", + }, []string{"item_type"}) return &multiLevelCache{ - postingsCaches: filterCachesByItem(enabledItems, cacheTypePostings, c...), - seriesCaches: filterCachesByItem(enabledItems, cacheTypeSeries, c...), - expandedPostingCaches: filterCachesByItem(enabledItems, cacheTypeExpandedPostings, c...), + postingsCaches: filterCachesByItem(enabledItems, storecache.CacheTypePostings, c...), + seriesCaches: filterCachesByItem(enabledItems, storecache.CacheTypeSeries, c...), + expandedPostingCaches: filterCachesByItem(enabledItems, storecache.CacheTypeExpandedPostings, c...), backfillProcessor: cacheutil.NewAsyncOperationProcessor(cfg.MaxAsyncBufferSize, cfg.MaxAsyncConcurrency), fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "cortex_store_multilevel_index_cache_fetch_duration_seconds", @@ -252,9 +242,16 @@ func newMultiLevelCache(reg prometheus.Registerer, cfg MultiLevelIndexCacheConfi Help: "Histogram to track latency to backfill items from multi level index cache", Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90}, }, []string{"item_type"}), - backfillDroppedPostings: backfillDroppedItems.WithLabelValues(cacheTypePostings), - backfillDroppedSeries: backfillDroppedItems.WithLabelValues(cacheTypeSeries), - backfillDroppedExpandedPostings: backfillDroppedItems.WithLabelValues(cacheTypeExpandedPostings), - maxBackfillItems: cfg.MaxBackfillItems, + backfillDroppedItems: map[string]prometheus.Counter{ + storecache.CacheTypePostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypePostings), + storecache.CacheTypeSeries: backfillDroppedItems.WithLabelValues(storecache.CacheTypeSeries), + storecache.CacheTypeExpandedPostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings), + }, + storeDroppedItems: map[string]prometheus.Counter{ + storecache.CacheTypePostings: storeDroppedItems.WithLabelValues(storecache.CacheTypePostings), + storecache.CacheTypeSeries: storeDroppedItems.WithLabelValues(storecache.CacheTypeSeries), + storecache.CacheTypeExpandedPostings: storeDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings), + }, + maxBackfillItems: cfg.MaxBackfillItems, } } diff --git a/pkg/storage/tsdb/multilevel_cache_test.go b/pkg/storage/tsdb/multilevel_cache_test.go index d66390a1c6..ee31a56346 100644 --- a/pkg/storage/tsdb/multilevel_cache_test.go +++ b/pkg/storage/tsdb/multilevel_cache_test.go @@ -183,7 +183,7 @@ func Test_MultiLevelCache(t *testing.T) { "StorePostings": {{bID, l1, v}}, }, enabledItems: [][]string{ - {cacheTypeSeries}, + {storecache.CacheTypeSeries}, {}, }, call: func(cache storecache.IndexCache) { @@ -207,7 +207,7 @@ func Test_MultiLevelCache(t *testing.T) { "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, enabledItems: [][]string{ - {cacheTypePostings}, + {storecache.CacheTypePostings}, {}, }, call: func(cache storecache.IndexCache) { @@ -231,7 +231,7 @@ func Test_MultiLevelCache(t *testing.T) { "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, enabledItems: [][]string{ - {cacheTypePostings}, + {storecache.CacheTypePostings}, {}, }, call: func(cache storecache.IndexCache) { @@ -295,7 +295,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiPostings": {map[labels.Label][]byte{l1: v, l2: v, l3: v2}, []labels.Label{}}, }, enabledItems: [][]string{ - {cacheTypeSeries}, + {storecache.CacheTypeSeries}, {}, }, call: func(cache storecache.IndexCache) { @@ -374,7 +374,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v, 2: v, 3: v2}, []storage.SeriesRef{}}, }, enabledItems: [][]string{ - {cacheTypePostings}, + {storecache.CacheTypePostings}, {}, }, call: func(cache storecache.IndexCache) { @@ -430,7 +430,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchExpandedPostings": {[]byte{}, true}, }, enabledItems: [][]string{ - {cacheTypePostings}, + {storecache.CacheTypePostings}, {}, }, call: func(cache storecache.IndexCache) { diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 1bd6121134..5d2c2d0ec1 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -736,6 +736,10 @@ type limiter struct { } func (c *limiter) Reserve(num uint64) error { + return c.ReserveWithType(num, 0) +} + +func (c *limiter) ReserveWithType(num uint64, _ store.StoreDataType) error { err := c.limiter.Reserve(num) if err != nil { return httpgrpc.Errorf(http.StatusUnprocessableEntity, err.Error()) diff --git a/pkg/util/histogram/testutils.go b/pkg/util/histogram/testutils.go index a2617d8e56..7554df3320 100644 --- a/pkg/util/histogram/testutils.go +++ b/pkg/util/histogram/testutils.go @@ -53,3 +53,41 @@ func GenerateTestHistograms(from, step, numHistograms, numSpans, numBuckets int) } return histograms } + +func GenerateTestHistogram(i int) *histogram.Histogram { + bucketsPerSide := 10 + spanLength := uint32(2) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + v := 10 + i + h := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: uint64(v + observationCount), + ZeroCount: uint64(v), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(v+1), + Schema: 2, + NegativeSpans: make([]histogram.Span, 5), + PositiveSpans: make([]histogram.Span, 5), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < 5; j++ { + s := histogram.Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + return h +} + +func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { + return GenerateTestHistogram(i).ToFloat(nil) +} diff --git a/pkg/util/test/test/poll.go b/pkg/util/test/test/poll.go deleted file mode 100644 index 05ba41235a..0000000000 --- a/pkg/util/test/test/poll.go +++ /dev/null @@ -1,26 +0,0 @@ -package test - -import ( - "reflect" - "testing" - "time" -) - -// Poll repeatedly calls a function until the function returns the correct response or until poll timeout. -func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { - t.Helper() - deadline := time.Now().Add(d) - for { - if time.Now().After(deadline) { - break - } - if reflect.DeepEqual(want, have()) { - return - } - time.Sleep(d / 100) - } - h := have() - if !reflect.DeepEqual(want, h) { - t.Fatalf("expected %v, got %v", want, h) - } -} diff --git a/pkg/util/test_util.go b/pkg/util/test_util.go index 7cc95abe2f..e196785bb5 100644 --- a/pkg/util/test_util.go +++ b/pkg/util/test_util.go @@ -3,6 +3,16 @@ package util import ( "math/rand" "strings" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" ) func GenerateRandomStrings() []string { @@ -19,3 +29,40 @@ func GenerateRandomStrings() []string { } return randomStrings } + +func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, points int, enc promchunk.Encoding) chunk.Chunk { + metric := labels.Labels{ + {Name: model.MetricNameLabel, Value: "foo"}, + } + pe := enc.PromChunkEncoding() + pc, err := chunkenc.NewEmptyChunk(pe) + require.NoError(t, err) + appender, err := pc.Appender() + require.NoError(t, err) + ts := from + + switch pe { + case chunkenc.EncXOR: + for i := 0; i < points; i++ { + appender.Append(int64(ts), float64(ts)) + ts = ts.Add(step) + } + case chunkenc.EncHistogram: + histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points, 5, 20) + for i := 0; i < points; i++ { + _, _, appender, err = appender.AppendHistogram(nil, int64(ts), histograms[i], true) + require.NoError(t, err) + ts = ts.Add(step) + } + case chunkenc.EncFloatHistogram: + histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points, 5, 20) + for i := 0; i < points; i++ { + _, _, appender, err = appender.AppendFloatHistogram(nil, int64(ts), histograms[i].ToFloat(nil), true) + require.NoError(t, err) + ts = ts.Add(step) + } + } + + ts = ts.Add(-step) // undo the add that we did just before exiting the loop + return chunk.NewChunk(metric, pc, from, ts) +} diff --git a/pkg/util/tls/test/tls_integration_test.go b/pkg/util/tls/test/tls_integration_test.go index 20268e3e2d..d37e57f2c6 100644 --- a/pkg/util/tls/test/tls_integration_test.go +++ b/pkg/util/tls/test/tls_integration_test.go @@ -150,7 +150,7 @@ func newIntegrationClientServer( assert.NoError(t, err, tc.name) dialOptions = append([]grpc.DialOption{grpc.WithDefaultCallOptions(clientConfig.CallOptions()...)}, dialOptions...) - conn, err := grpc.Dial(grpcHost, dialOptions...) + conn, err := grpc.NewClient(grpcHost, dialOptions...) assert.NoError(t, err, tc.name) require.NoError(t, err, tc.name) require.NoError(t, err, tc.name) diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index d65ce2905a..a0f53aaabc 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -24,6 +24,7 @@ import ( var errMaxGlobalSeriesPerUserValidation = errors.New("The ingester.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled") var errDuplicateQueryPriorities = errors.New("duplicate entry of priorities found. Make sure they are all unique, including the default priority") var errCompilingQueryPriorityRegex = errors.New("error compiling query priority regex") +var errDuplicatePerLabelSetLimit = errors.New("duplicate per labelSet limits found. Make sure they are all unique") // Supported values for enum limits const ( @@ -76,11 +77,15 @@ type TimeWindow struct { End model.Duration `yaml:"end" json:"end" doc:"nocli|description=End of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.|default=0"` } -type MaxSeriesPerLabelSet struct { - Limit int `yaml:"limit" json:"limit" doc:"nocli|description=The maximum number of active series per LabelSet before replication."` - LabelSet labels.Labels `yaml:"label_set" json:"label_set" doc:"nocli|description=LabelSet which the limit should be applied."` - Id string `yaml:"-" json:"-" doc:"nocli"` - Hash uint64 `yaml:"-" json:"-" doc:"nocli"` +type LimitsPerLabelSetEntry struct { + MaxSeries int `yaml:"max_series" json:"max_series" doc:"nocli|description=The maximum number of active series per LabelSet, across the cluster before replication. Setting the value 0 will enable the monitoring (metrics) but would not enforce any limits."` +} + +type LimitsPerLabelSet struct { + Limits LimitsPerLabelSetEntry `yaml:"limits" json:"limits" doc:"nocli"` + LabelSet labels.Labels `yaml:"label_set" json:"label_set" doc:"nocli|description=LabelSet which the limit should be applied."` + Id string `yaml:"-" json:"-" doc:"nocli"` + Hash uint64 `yaml:"-" json:"-" doc:"nocli"` } // Limits describe all the limits for users; can be used to describe global default @@ -111,11 +116,11 @@ type Limits struct { // Ingester enforced limits. // Series - MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"` - MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` - MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` - MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` - MaxSeriesPerLabelSet []MaxSeriesPerLabelSet `yaml:"max_series_per_label_set" json:"max_series_per_label_set" doc:"nocli|description=[Experimental] The maximum number of active series per LabelSet, across the cluster before replication. Empty list to disable."` + MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"` + MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` + MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` + MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` + LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"` // Metadata MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"` @@ -295,7 +300,9 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - l.calculateMaxSeriesPerLabelSetId() + if err := l.calculateMaxSeriesPerLabelSetId(); err != nil { + return err + } return nil } @@ -323,17 +330,27 @@ func (l *Limits) UnmarshalJSON(data []byte) error { return err } - l.calculateMaxSeriesPerLabelSetId() + if err := l.calculateMaxSeriesPerLabelSetId(); err != nil { + return err + } return nil } -func (l *Limits) calculateMaxSeriesPerLabelSetId() { - for k, limit := range l.MaxSeriesPerLabelSet { +func (l *Limits) calculateMaxSeriesPerLabelSetId() error { + hMap := map[uint64]struct{}{} + + for k, limit := range l.LimitsPerLabelSet { limit.Id = limit.LabelSet.String() limit.Hash = fnv1a.HashBytes64([]byte(limit.Id)) - l.MaxSeriesPerLabelSet[k] = limit + l.LimitsPerLabelSet[k] = limit + if _, ok := hMap[limit.Hash]; ok { + return errDuplicatePerLabelSetLimit + } + hMap[limit.Hash] = struct{}{} } + + return nil } func (l *Limits) copyNotificationIntegrationLimits(defaults NotificationRateLimitMap) { @@ -541,9 +558,9 @@ func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int { return o.GetOverridesForUser(userID).MaxGlobalSeriesPerMetric } -// MaxSeriesPerLabelSet returns the maximum number of series allowed per labelset across the cluster. -func (o *Overrides) MaxSeriesPerLabelSet(userID string) []MaxSeriesPerLabelSet { - return o.GetOverridesForUser(userID).MaxSeriesPerLabelSet +// LimitsPerLabelSet returns the user limits per labelset across the cluster. +func (o *Overrides) LimitsPerLabelSet(userID string) []LimitsPerLabelSet { + return o.GetOverridesForUser(userID).LimitsPerLabelSet } // MaxChunksPerQueryFromStore returns the maximum number of chunks allowed per query when fetching diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index bc212540d3..a104c2e7b0 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -174,6 +174,37 @@ func TestLimitsTagsYamlMatchJson(t *testing.T) { assert.Empty(t, mismatch, "expected no mismatched JSON and YAML tags") } +func TestOverrides_LimitsPerLabelSet(t *testing.T) { + inputYAML := ` +limits_per_label_set: + - label_set: + labelName1: LabelValue1 + limits: + max_series: 10 +` + + limitsYAML := Limits{} + err := yaml.Unmarshal([]byte(inputYAML), &limitsYAML) + require.NoError(t, err) + require.Len(t, limitsYAML.LimitsPerLabelSet, 1) + require.Len(t, limitsYAML.LimitsPerLabelSet[0].LabelSet, 1) + require.Equal(t, limitsYAML.LimitsPerLabelSet[0].Limits.MaxSeries, 10) + + duplicatedInputYAML := ` +limits_per_label_set: + - label_set: + labelName1: LabelValue1 + limits: + max_series: 10 + - label_set: + labelName1: LabelValue1 + limits: + max_series: 10 +` + err = yaml.Unmarshal([]byte(duplicatedInputYAML), &limitsYAML) + require.Equal(t, err, errDuplicatePerLabelSetLimit) +} + func TestLimitsStringDurationYamlMatchJson(t *testing.T) { inputYAML := ` max_query_lookback: 1s diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go index 0985e7db75..f03b8b74b1 100644 --- a/pkg/util/validation/validate.go +++ b/pkg/util/validation/validate.go @@ -1,6 +1,7 @@ package validation import ( + "errors" "net/http" "strings" "time" @@ -68,52 +69,66 @@ const ( ExemplarMaxLabelSetLength = 128 ) -// DiscardedSamples is a metric of the number of discarded samples, by reason. -var DiscardedSamples = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_samples_total", - Help: "The total number of samples that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) +type ValidateMetrics struct { + DiscardedSamples *prometheus.CounterVec + DiscardedExemplars *prometheus.CounterVec + DiscardedMetadata *prometheus.CounterVec +} -// DiscardedExemplars is a metric of the number of discarded exemplars, by reason. -var DiscardedExemplars = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_exemplars_total", - Help: "The total number of exemplars that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) +func registerCollector(r prometheus.Registerer, c prometheus.Collector) { + err := r.Register(c) + if err != nil && !errors.As(err, &prometheus.AlreadyRegisteredError{}) { + panic(err) + } +} -// DiscardedMetadata is a metric of the number of discarded metadata, by reason. -var DiscardedMetadata = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_metadata_total", - Help: "The total number of metadata that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) +func NewValidateMetrics(r prometheus.Registerer) *ValidateMetrics { + discardedSamples := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_discarded_samples_total", + Help: "The total number of samples that were discarded.", + }, + []string{discardReasonLabel, "user"}, + ) + registerCollector(r, discardedSamples) + discardedExemplars := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_discarded_exemplars_total", + Help: "The total number of exemplars that were discarded.", + }, + []string{discardReasonLabel, "user"}, + ) + registerCollector(r, discardedExemplars) + discardedMetadata := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_discarded_metadata_total", + Help: "The total number of metadata that were discarded.", + }, + []string{discardReasonLabel, "user"}, + ) + registerCollector(r, discardedMetadata) + m := &ValidateMetrics{ + DiscardedSamples: discardedSamples, + DiscardedExemplars: discardedExemplars, + DiscardedMetadata: discardedMetadata, + } -func init() { - prometheus.MustRegister(DiscardedSamples) - prometheus.MustRegister(DiscardedExemplars) - prometheus.MustRegister(DiscardedMetadata) + return m } -// ValidateSample returns an err if the sample is invalid. +// ValidateSampleTimestamp returns an err if the sample timestamp is invalid. // The returned error may retain the provided series labels. -func ValidateSample(limits *Limits, userID string, ls []cortexpb.LabelAdapter, s cortexpb.Sample) ValidationError { +func ValidateSampleTimestamp(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, timestampMs int64) ValidationError { unsafeMetricName, _ := extract.UnsafeMetricNameFromLabelAdapters(ls) - if limits.RejectOldSamples && model.Time(s.TimestampMs) < model.Now().Add(-time.Duration(limits.RejectOldSamplesMaxAge)) { - DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc() - return newSampleTimestampTooOldError(unsafeMetricName, s.TimestampMs) + if limits.RejectOldSamples && model.Time(timestampMs) < model.Now().Add(-time.Duration(limits.RejectOldSamplesMaxAge)) { + validateMetrics.DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc() + return newSampleTimestampTooOldError(unsafeMetricName, timestampMs) } - if model.Time(s.TimestampMs) > model.Now().Add(time.Duration(limits.CreationGracePeriod)) { - DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc() - return newSampleTimestampTooNewError(unsafeMetricName, s.TimestampMs) + if model.Time(timestampMs) > model.Now().Add(time.Duration(limits.CreationGracePeriod)) { + validateMetrics.DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc() + return newSampleTimestampTooNewError(unsafeMetricName, timestampMs) } return nil @@ -121,14 +136,14 @@ func ValidateSample(limits *Limits, userID string, ls []cortexpb.LabelAdapter, s // ValidateExemplar returns an error if the exemplar is invalid. // The returned error may retain the provided series labels. -func ValidateExemplar(userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError { +func ValidateExemplar(validateMetrics *ValidateMetrics, userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError { if len(e.Labels) <= 0 { - DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc() + validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc() return newExemplarEmtpyLabelsError(ls, []cortexpb.LabelAdapter{}, e.TimestampMs) } if e.TimestampMs == 0 { - DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc() + validateMetrics.DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc() return newExemplarMissingTimestampError( ls, e.Labels, @@ -145,7 +160,7 @@ func ValidateExemplar(userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exem } if labelSetLen > ExemplarMaxLabelSetLength { - DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc() + validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc() return newExemplarLabelLengthError( ls, e.Labels, @@ -158,23 +173,23 @@ func ValidateExemplar(userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exem // ValidateLabels returns an err if the labels are invalid. // The returned error may retain the provided series labels. -func ValidateLabels(limits *Limits, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError { +func ValidateLabels(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError { if limits.EnforceMetricName { unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(ls) if err != nil { - DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc() return newNoMetricNameError() } if !model.IsValidMetricName(model.LabelValue(unsafeMetricName)) { - DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc() return newInvalidMetricNameError(unsafeMetricName) } } numLabelNames := len(ls) if numLabelNames > limits.MaxLabelNamesPerSeries { - DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc() return newTooManyLabelsError(ls, limits.MaxLabelNamesPerSeries) } @@ -186,21 +201,21 @@ func ValidateLabels(limits *Limits, userID string, ls []cortexpb.LabelAdapter, s for _, l := range ls { if !skipLabelNameValidation && !model.LabelName(l.Name).IsValid() { - DiscardedSamples.WithLabelValues(invalidLabel, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(invalidLabel, userID).Inc() return newInvalidLabelError(ls, l.Name) } else if len(l.Name) > maxLabelNameLength { - DiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc() return newLabelNameTooLongError(ls, l.Name, maxLabelNameLength) } else if len(l.Value) > maxLabelValueLength { - DiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc() return newLabelValueTooLongError(ls, l.Name, l.Value, maxLabelValueLength) } else if cmp := strings.Compare(lastLabelName, l.Name); cmp >= 0 { if cmp == 0 { - DiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc() return newDuplicatedLabelError(ls, l.Name) } - DiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc() return newLabelsNotSortedError(ls, l.Name) } @@ -208,16 +223,16 @@ func ValidateLabels(limits *Limits, userID string, ls []cortexpb.LabelAdapter, s labelsSizeBytes += l.Size() } if maxLabelsSizeBytes > 0 && labelsSizeBytes > maxLabelsSizeBytes { - DiscardedSamples.WithLabelValues(labelsSizeBytesExceeded, userID).Inc() + validateMetrics.DiscardedSamples.WithLabelValues(labelsSizeBytesExceeded, userID).Inc() return labelSizeBytesExceededError(ls, labelsSizeBytes, maxLabelsSizeBytes) } return nil } // ValidateMetadata returns an err if a metric metadata is invalid. -func ValidateMetadata(cfg *Limits, userID string, metadata *cortexpb.MetricMetadata) error { +func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID string, metadata *cortexpb.MetricMetadata) error { if cfg.EnforceMetadataMetricName && metadata.GetMetricFamilyName() == "" { - DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc() + validateMetrics.DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName) } @@ -240,23 +255,23 @@ func ValidateMetadata(cfg *Limits, userID string, metadata *cortexpb.MetricMetad } if reason != "" { - DiscardedMetadata.WithLabelValues(reason, userID).Inc() + validateMetrics.DiscardedMetadata.WithLabelValues(reason, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.GetMetricFamilyName()) } return nil } -func DeletePerUserValidationMetrics(userID string, log log.Logger) { +func DeletePerUserValidationMetrics(validateMetrics *ValidateMetrics, userID string, log log.Logger) { filter := map[string]string{"user": userID} - if err := util.DeleteMatchingLabels(DiscardedSamples, filter); err != nil { + if err := util.DeleteMatchingLabels(validateMetrics.DiscardedSamples, filter); err != nil { level.Warn(log).Log("msg", "failed to remove cortex_discarded_samples_total metric for user", "user", userID, "err", err) } - if err := util.DeleteMatchingLabels(DiscardedExemplars, filter); err != nil { + if err := util.DeleteMatchingLabels(validateMetrics.DiscardedExemplars, filter); err != nil { level.Warn(log).Log("msg", "failed to remove cortex_discarded_exemplars_total metric for user", "user", userID, "err", err) } - if err := util.DeleteMatchingLabels(DiscardedMetadata, filter); err != nil { + if err := util.DeleteMatchingLabels(validateMetrics.DiscardedMetadata, filter); err != nil { level.Warn(log).Log("msg", "failed to remove cortex_discarded_metadata_total metric for user", "user", userID, "err", err) } } diff --git a/pkg/util/validation/validate_test.go b/pkg/util/validation/validate_test.go index 71dddab5ba..2ecec1ee72 100644 --- a/pkg/util/validation/validate_test.go +++ b/pkg/util/validation/validate_test.go @@ -20,6 +20,9 @@ func TestValidateLabels(t *testing.T) { cfg := new(Limits) userID := "testUser" + reg := prometheus.NewRegistry() + validateMetrics := NewValidateMetrics(reg) + cfg.MaxLabelValueLength = 25 cfg.MaxLabelNameLength = 25 cfg.MaxLabelNamesPerSeries = 2 @@ -93,13 +96,13 @@ func TestValidateLabels(t *testing.T) { nil, }, } { - err := ValidateLabels(cfg, userID, cortexpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation) + err := ValidateLabels(validateMetrics, cfg, userID, cortexpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation) assert.Equal(t, c.err, err, "wrong error") } - DiscardedSamples.WithLabelValues("random reason", "different user").Inc() + validateMetrics.DiscardedSamples.WithLabelValues("random reason", "different user").Inc() - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="label_invalid",user="testUser"} 1 @@ -113,9 +116,9 @@ func TestValidateLabels(t *testing.T) { cortex_discarded_samples_total{reason="random reason",user="different user"} 1 `), "cortex_discarded_samples_total")) - DeletePerUserValidationMetrics(userID, util_log.Logger) + DeletePerUserValidationMetrics(validateMetrics, userID, util_log.Logger) - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="random reason",user="different user"} 1 @@ -124,7 +127,8 @@ func TestValidateLabels(t *testing.T) { func TestValidateExemplars(t *testing.T) { userID := "testUser" - + reg := prometheus.NewRegistry() + validateMetrics := NewValidateMetrics(reg) invalidExemplars := []cortexpb.Exemplar{ { // Missing labels @@ -142,13 +146,13 @@ func TestValidateExemplars(t *testing.T) { } for _, ie := range invalidExemplars { - err := ValidateExemplar(userID, []cortexpb.LabelAdapter{}, ie) + err := ValidateExemplar(validateMetrics, userID, []cortexpb.LabelAdapter{}, ie) assert.NotNil(t, err) } - DiscardedExemplars.WithLabelValues("random reason", "different user").Inc() + validateMetrics.DiscardedExemplars.WithLabelValues("random reason", "different user").Inc() - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_exemplars_total The total number of exemplars that were discarded. # TYPE cortex_discarded_exemplars_total counter cortex_discarded_exemplars_total{reason="exemplar_labels_missing",user="testUser"} 1 @@ -159,8 +163,8 @@ func TestValidateExemplars(t *testing.T) { `), "cortex_discarded_exemplars_total")) // Delete test user and verify only different remaining - DeletePerUserValidationMetrics(userID, util_log.Logger) - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + DeletePerUserValidationMetrics(validateMetrics, userID, util_log.Logger) + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_exemplars_total The total number of exemplars that were discarded. # TYPE cortex_discarded_exemplars_total counter cortex_discarded_exemplars_total{reason="random reason",user="different user"} 1 @@ -171,7 +175,8 @@ func TestValidateMetadata(t *testing.T) { cfg := new(Limits) cfg.EnforceMetadataMetricName = true cfg.MaxMetadataLength = 22 - + reg := prometheus.NewRegistry() + validateMetrics := NewValidateMetrics(reg) userID := "testUser" for _, c := range []struct { @@ -206,14 +211,14 @@ func TestValidateMetadata(t *testing.T) { }, } { t.Run(c.desc, func(t *testing.T) { - err := ValidateMetadata(cfg, userID, c.metadata) + err := ValidateMetadata(validateMetrics, cfg, userID, c.metadata) assert.Equal(t, c.err, err, "wrong error") }) } - DiscardedMetadata.WithLabelValues("random reason", "different user").Inc() + validateMetrics.DiscardedMetadata.WithLabelValues("random reason", "different user").Inc() - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_metadata_total The total number of metadata that were discarded. # TYPE cortex_discarded_metadata_total counter cortex_discarded_metadata_total{reason="help_too_long",user="testUser"} 1 @@ -224,9 +229,9 @@ func TestValidateMetadata(t *testing.T) { cortex_discarded_metadata_total{reason="random reason",user="different user"} 1 `), "cortex_discarded_metadata_total")) - DeletePerUserValidationMetrics(userID, util_log.Logger) + DeletePerUserValidationMetrics(validateMetrics, userID, util_log.Logger) - require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer, strings.NewReader(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_metadata_total The total number of metadata that were discarded. # TYPE cortex_discarded_metadata_total counter cortex_discarded_metadata_total{reason="random reason",user="different user"} 1 @@ -238,10 +243,11 @@ func TestValidateLabelOrder(t *testing.T) { cfg.MaxLabelNameLength = 10 cfg.MaxLabelNamesPerSeries = 10 cfg.MaxLabelValueLength = 10 - + reg := prometheus.NewRegistry() + validateMetrics := NewValidateMetrics(reg) userID := "testUser" - actual := ValidateLabels(cfg, userID, []cortexpb.LabelAdapter{ + actual := ValidateLabels(validateMetrics, cfg, userID, []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "m"}, {Name: "b", Value: "b"}, {Name: "a", Value: "a"}, @@ -259,10 +265,11 @@ func TestValidateLabelDuplication(t *testing.T) { cfg.MaxLabelNameLength = 10 cfg.MaxLabelNamesPerSeries = 10 cfg.MaxLabelValueLength = 10 - + reg := prometheus.NewRegistry() + validateMetrics := NewValidateMetrics(reg) userID := "testUser" - actual := ValidateLabels(cfg, userID, []cortexpb.LabelAdapter{ + actual := ValidateLabels(validateMetrics, cfg, userID, []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "a"}, {Name: model.MetricNameLabel, Value: "b"}, }, false) @@ -272,7 +279,7 @@ func TestValidateLabelDuplication(t *testing.T) { }, model.MetricNameLabel) assert.Equal(t, expected, actual) - actual = ValidateLabels(cfg, userID, []cortexpb.LabelAdapter{ + actual = ValidateLabels(validateMetrics, cfg, userID, []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "a"}, {Name: "a", Value: "a"}, {Name: "a", Value: "a"}, diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index ef00e902c6..7ef5fc0def 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,48 @@ # Changelog +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index ea7c1b0ad8..d579e482e8 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -39,7 +39,7 @@ const ( // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. - defaultExpiryDelta = 215 * time.Second + defaultExpiryDelta = 225 * time.Second universeDomainDefault = "googleapis.com" ) diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6db643837e..f3ec888242 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -64,9 +64,9 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.Get(tokenURI.String()) + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } var res metadataTokenResp if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index a66e56d70f..fe93557389 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -137,6 +137,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) Scopes: opts.scopes(), TokenURL: f.TokenURL, Subject: opts.Subject, + Client: opts.client(), } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -154,6 +155,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) AuthStyle: auth.StyleInParams, EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, + Client: opts.client(), } return auth.New3LOTokenProvider(opts3LO) } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index fc32a5f2bb..75bda4c638 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -47,7 +47,7 @@ var ( // Options used to configure a [GRPCClientConnPool] from [Dial]. type Options struct { - // DisableTelemetry disables default telemetry (OpenCensus). An example + // DisableTelemetry disables default telemetry (OpenTelemetry). An example // reason to do so would be to bind custom telemetry that overrides the // defaults. DisableTelemetry bool @@ -196,6 +196,8 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + tOpts.EnableDirectPath = io.EnableDirectPath + tOpts.EnableDirectPathXds = io.EnableDirectPathXds } transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) if err != nil { @@ -214,12 +216,16 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // Authentication can only be sent when communicating over a secure connection. if !opts.DisableAuthentication { metadata := opts.Metadata - creds, err := credentials.DetectDefault(opts.resolveDetectOptions()) - if err != nil { - return nil, err - } + + var creds *auth.Credentials if opts.Credentials != nil { creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } } qp, err := creds.QuotaProjectID(ctx) diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 06acc04151..ef09c1b752 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -33,7 +33,7 @@ type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, e // Options used to configure a [net/http.Client] from [NewClient]. type Options struct { - // DisableTelemetry disables default telemetry (OpenCensus). An example + // DisableTelemetry disables default telemetry (OpenTelemetry). An example // reason to do so would be to bind custom telemetry that overrides the // defaults. DisableTelemetry bool @@ -152,7 +152,14 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er } base := client.Transport if base == nil { - base = http.DefaultTransport.(*http.Transport).Clone() + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } } client.Transport = &authTransport{ creds: creds, diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index aef4663e6c..94caeb00f0 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -58,9 +58,15 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err Key: opts.APIKey, } default: - creds, err := credentials.DetectDefault(opts.resolveDetectOptions()) - if err != nil { - return nil, err + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } } qp, err := creds.QuotaProjectID(context.Background()) if err != nil { @@ -72,10 +78,6 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err } headers.Set(quotaProjectHeaderKey, qp) } - - if opts.Credentials != nil { - creds = opts.Credentials - } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ base: trans, diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 7ee02c6f61..6ef88311a2 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -61,6 +61,8 @@ type Options struct { ClientCertProvider cert.Provider Client *http.Client UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool } // getUniverseDomain returns the default service domain for a given Cloud @@ -195,10 +197,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { } s2aMTLSEndpoint := opts.DefaultMTLSEndpoint - // If there is endpoint override, honor it. - if opts.Endpoint != "" { - s2aMTLSEndpoint = endpoint - } + s2aAddress := GetS2AAddress() if s2aAddress == "" { return &defaultTransportConfig, nil @@ -217,12 +216,8 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { // A nil default source can be returned if the source does not exist. Any exceptions // encountered while initializing the default source will be reported as client // error (ex. corrupt metadata file). -// -// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE -// must be set to "true" to allow certificate to be used (including user provided -// certificates). For details, see AIP-4114. func getClientCertificateSource(opts *Options) (cert.Provider, error) { - if !isClientCertificateEnabled() { + if !isClientCertificateEnabled(opts) { return nil, nil } else if opts.ClientCertProvider != nil { return opts.ClientCertProvider, nil @@ -231,11 +226,14 @@ func getClientCertificateSource(opts *Options) (cert.Provider, error) { } -func isClientCertificateEnabled() bool { - // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. - // error as false is a good default - b, _ := strconv.ParseBool(os.Getenv(googleAPIUseCertSource)) - return b +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() } type transportConfig struct { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 0000000000..ea1e1febbc --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", errSourceUnavailable + } + return "", "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errors.New("no Workload Identity Federation certificate information found in the certificate configuration file") + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index e9e4793523..2ed532deb7 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -34,12 +34,6 @@ var ( // The period an MTLS config can be reused before needing refresh. configExpiry = time.Hour - // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. - mtlsEndpointEnabledForS2A = func() bool { - // TODO(xmenxk): determine this via discovery config. - return true - } - // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. mtlsOnce sync.Once ) @@ -165,19 +159,16 @@ func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { if !isGoogleS2AEnabled() { return false } - // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A. - if opts.DefaultMTLSEndpoint == "" && opts.Endpoint == "" { - return false - } - // If MTLS is not enabled for this endpoint, skip S2A. - if !mtlsEndpointEnabledForS2A() { + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { return false } // If custom HTTP client is provided, skip S2A. if opts.Client != nil { return false } - return true + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds } func isGoogleS2AEnabled() bool { diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 43a1793848..af5ff37488 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,20 @@ # Changes +## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) + + +### Bug Fixes + +* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14) + + +### Bug Fixes + +* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + ## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index b5243e6129..3fbf4530d0 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: google/iam/v1/iam_policy.proto package iampb diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 3f854fe496..29738ad1ce 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: google/iam/v1/options.proto package iampb diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index dfc60661a3..a4e15741b6 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: google/iam/v1/policy.proto package iampb diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 9905b164f2..655fc5d824 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -339,6 +339,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/backupdr/apiv1": { + "api_shortname": "backupdr", + "distribution_name": "cloud.google.com/go/backupdr/apiv1", + "description": "Backup and DR Service API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/baremetalsolution/apiv2": { "api_shortname": "baremetalsolution", "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", @@ -649,6 +659,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/chat/apiv1": { + "api_shortname": "chat", + "distribution_name": "cloud.google.com/go/chat/apiv1", + "description": "Google Chat API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/chat/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "api_shortname": "cloudbuild", "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", @@ -756,7 +776,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { @@ -806,7 +826,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/contactcenterinsights/apiv1": { @@ -1369,6 +1389,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/identitytoolkit/apiv2": { + "api_shortname": "identitytoolkit", + "distribution_name": "cloud.google.com/go/identitytoolkit/apiv2", + "description": "Identity Toolkit API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/identitytoolkit/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/ids/apiv1": { "api_shortname": "ids", "distribution_name": "cloud.google.com/go/ids/apiv1", @@ -1539,6 +1569,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/routeoptimization/apiv1": { + "api_shortname": "routeoptimization", + "distribution_name": "cloud.google.com/go/maps/routeoptimization/apiv1", + "description": "Route Optimization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routeoptimization/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/routing/apiv2": { "api_shortname": "routes", "distribution_name": "cloud.google.com/go/maps/routing/apiv2", @@ -1549,6 +1589,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/solar/apiv1": { + "api_shortname": "solar", + "distribution_name": "cloud.google.com/go/maps/solar/apiv1", + "description": "Solar API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "api_shortname": "mediatranslation", "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", @@ -1616,7 +1666,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { @@ -1656,7 +1706,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { @@ -2026,7 +2076,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { @@ -2169,6 +2219,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/security/publicca/apiv1": { + "api_shortname": "publicca", + "distribution_name": "cloud.google.com/go/security/publicca/apiv1", + "description": "Public Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/security/publicca/apiv1beta1": { "api_shortname": "publicca", "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", @@ -2286,7 +2346,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { @@ -2329,6 +2389,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/conversions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", @@ -2339,6 +2409,46 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/lfp/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/lfp/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/lfp/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/notifications/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/notifications/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/notifications/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/quota/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/reports/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/reports/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reports/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/spanner": { "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", @@ -2432,7 +2542,7 @@ "cloud.google.com/go/storage/control/apiv2": { "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage/control/apiv2", - "description": "Cloud Storage API", + "description": "Storage Control API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2", @@ -2469,6 +2579,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/streetview/publish/apiv1": { + "api_shortname": "streetviewpublish", + "distribution_name": "cloud.google.com/go/streetview/publish/apiv1", + "description": "Street View Publish API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/streetview/latest/publish/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/support/apiv2": { "api_shortname": "cloudsupport", "distribution_name": "cloud.google.com/go/support/apiv2", @@ -2756,7 +2876,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workstations/apiv1beta": { diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 9cb6f8f17c..625ad4fbe7 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,25 @@ # Changes +## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) + + +### Features + +* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3)) +* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330)) + + +### Bug Fixes + +* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9)) +* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + + +### Performance Improvements + +* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca)) + ## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index bdbf3acfea..e337213f03 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "net/url" "os" @@ -27,6 +28,7 @@ import ( "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/golang/protobuf/proto" "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -34,8 +36,10 @@ import ( "google.golang.org/api/option/internaloption" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -902,12 +906,50 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } +// bytesCodec is a grpc codec which permits receiving messages as either +// protobuf messages, or as raw []bytes. +type bytesCodec struct { + encoding.Codec +} + +func (bytesCodec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (bytesCodec) Unmarshal(data []byte, v any) error { + switch v := v.(type) { + case *[]byte: + // If gRPC could recycle the data []byte after unmarshaling (through + // buffer pools), we would need to make a copy here. + *v = data + return nil + case proto.Message: + return proto.Unmarshal(data, v) + default: + return fmt.Errorf("can not unmarshal type %T", v) + } +} + +func (bytesCodec) Name() string { + // If this isn't "", then gRPC sets the content-subtype of the call to this + // value and we get errors. + return "" +} + func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) + s.gax = append(s.gax, gax.WithGRPCOptions( + grpc.ForceCodec(bytesCodec{}), + )) + if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } @@ -923,6 +965,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.Generation = params.gen } + var databuf []byte + // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { @@ -957,12 +1001,23 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return err } - msg, err = stream.Recv() + // Receive the message into databuf as a wire-encoded message so we can + // use a custom decoder to avoid an extra copy at the protobuf layer. + err := stream.RecvMsg(&databuf) // These types of errors show up on the Recv call, rather than the // initialization of the stream via ReadObject above. if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } + if err != nil { + return err + } + // Use a custom decoder that uses protobuf unmarshalling for all + // fields except the checksummed data. + // Subsequent receives in Read calls will skip all protobuf + // unmarshalling and directly read the content from the gRPC []byte + // response, since only the first call will contain other fields. + msg, err = readFullObjectResponse(databuf) return err }, s.retry, s.idempotent) @@ -988,6 +1043,16 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() + // Only support checksums when reading an entire object, not a range. + var ( + wantCRC uint32 + checkCRC bool + ) + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + wantCRC = checksums.GetCrc32C() + checkCRC = true + } + r = &Reader{ Attrs: ReaderObjectAttrs{ Size: size, @@ -1008,7 +1073,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange leftovers: msg.GetChecksummedData().GetContent(), settings: s, zeroRange: params.length == 0, + databuf: databuf, + wantCRC: wantCRC, + checkCRC: checkCRC, }, + checkCRC: checkCRC, } cr := msg.GetContentRange() @@ -1026,12 +1095,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange r.reader.Close() } - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - return r, nil } @@ -1406,14 +1469,37 @@ type gRPCReader struct { stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte + databuf []byte cancel context.CancelFunc settings *settings + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Update the running CRC with the data in the slice, if CRC checking was enabled. +func (r *gRPCReader) updateCRC(b []byte) { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b) + } +} + +// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled. +func (r *gRPCReader) runCRCCheck() error { + if r.checkCRC && r.gotCRC != r.wantCRC { + return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC) + } + return nil } // Read reads bytes into the user's buffer from an open gRPC stream. func (r *gRPCReader) Read(p []byte) (int, error) { - // The entire object has been read by this reader, return EOF. + // The entire object has been read by this reader, check the checksum if + // necessary and return EOF. if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } return 0, io.EOF } @@ -1422,7 +1508,7 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // using the same reader. One encounters an error and the stream is closed // and then reopened while the other routine attempts to read from it. if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") + return 0, fmt.Errorf("storage: reader has been closed") } var n int @@ -1431,12 +1517,13 @@ func (r *gRPCReader) Read(p []byte) (int, error) { if len(r.leftovers) > 0 { n = copy(p, r.leftovers) r.seen += int64(n) + r.updateCRC(p[:n]) r.leftovers = r.leftovers[n:] return n, nil } // Attempt to Recv the next message on the stream. - msg, err := r.recv() + content, err := r.recv() if err != nil { return 0, err } @@ -1448,7 +1535,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() n = copy(p[n:], content) leftover := len(content) - n if leftover > 0 { @@ -1457,10 +1543,78 @@ func (r *gRPCReader) Read(p []byte) (int, error) { r.leftovers = content[n:] } r.seen += int64(n) + r.updateCRC(p[:n]) return n, nil } +// WriteTo writes all the data requested by the Reader into w, implementing +// io.WriterTo. +func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { + // The entire object has been read by this reader, check the checksum if + // necessary and return nil. + if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } + return 0, nil + } + + // No stream to read from, either never initialized or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("storage: reader has been closed") + } + + // Track bytes written during before call. + var alreadySeen = r.seen + + // Write any leftovers to the stream. There will be some leftovers from the + // original NewRangeReader call. + if len(r.leftovers) > 0 { + // Write() will write the entire leftovers slice unless there is an error. + written, err := w.Write(r.leftovers) + r.seen += int64(written) + r.updateCRC(r.leftovers) + r.leftovers = nil + if err != nil { + return r.seen - alreadySeen, err + } + } + + // Loop and receive additional messages until the entire data is written. + for { + // Attempt to receive the next message on the stream. + // Will terminate with io.EOF once data has all come through. + // recv() handles stream reopening and retry logic so no need for retries here. + msg, err := r.recv() + if err != nil { + if err == io.EOF { + // We are done; check the checksum if necessary and return. + err = r.runCRCCheck() + } + return r.seen - alreadySeen, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + written, err := w.Write(msg) + r.seen += int64(written) + r.updateCRC(msg) + if err != nil { + return r.seen - alreadySeen, err + } + } + +} + // Close cancels the read stream's context in order for it to be closed and // collected. func (r *gRPCReader) Close() error { @@ -1471,9 +1625,10 @@ func (r *gRPCReader) Close() error { return nil } -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: +// recv attempts to Recv the next message on the stream and extract the object +// data that it contains. In the event that a retryable error is encountered, +// the stream will be closed, reopened, and RecvMsg again. +// This will attempt to Recv until one of the following is true: // // * Recv is successful // * A non-retryable error is encountered @@ -1481,8 +1636,9 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() +func (r *gRPCReader) recv() ([]byte, error) { + err := r.stream.RecvMsg(&r.databuf) + var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { shouldRetry = r.settings.retry.shouldRetry @@ -1492,10 +1648,195 @@ func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is // successful, the next logical chunk will be returned. - msg, err = r.reopenStream() + msg, err := r.reopenStream() + return msg.GetChecksummedData().GetContent(), err + } + + if err != nil { + return nil, err + } + + return readObjectResponseContent(r.databuf) +} + +// ReadObjectResponse field and subfield numbers. +const ( + checksummedDataField = protowire.Number(1) + checksummedDataContentField = protowire.Number(1) + checksummedDataCRC32CField = protowire.Number(2) + objectChecksumsField = protowire.Number(2) + contentRangeField = protowire.Number(3) + metadataField = protowire.Number(4) +) + +// readObjectResponseContent returns the checksummed_data.content field of a +// ReadObjectResponse message, or an error if the message is invalid. +// This can be used on recvs of objects after the first recv, since only the +// first message will contain non-data fields. +func readObjectResponseContent(b []byte) ([]byte, error) { + checksummedData, err := readProtoBytes(b, checksummedDataField) + if err != nil { + return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + } + content, err := readProtoBytes(checksummedData, checksummedDataContentField) + if err != nil { + return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) } - return msg, err + return content, nil +} + +// readFullObjectResponse returns the ReadObjectResponse that is encoded in the +// wire-encoded message buffer b, or an error if the message is invalid. +// This must be used on the first recv of an object as it may contain all fields +// of ReadObjectResponse, and we use or pass on those fields to the user. +// This function is essentially identical to proto.Unmarshal, except it aliases +// the data in the input []byte. If the proto library adds a feature to +// Unmarshal that does that, this function can be dropped. +func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { + msg := &storagepb.ReadObjectResponse{} + + // Loop over the entire message, extracting fields as we go. This does not + // handle field concatenation, in which the contents of a single field + // are split across multiple protobuf tags. + off := 0 + for off < len(b) { + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) + if fieldLength < 0 { + return nil, protowire.ParseError(fieldLength) + } + off += fieldLength + + // Unmarshal the field according to its type. Only fields that are not + // nil will be present. + switch { + case fieldNum == checksummedDataField && fieldType == protowire.BytesType: + // The ChecksummedData field was found. Initialize the struct. + msg.ChecksummedData = &storagepb.ChecksummedData{} + + // Get the bytes corresponding to the checksummed data. + fieldContent, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + } + off += n + + // Get the nested fields. We need to do this manually as it contains + // the object content bytes. + contentOff := 0 + for contentOff < len(fieldContent) { + gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + + switch { + case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: + // Get the content bytes. + bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Content = bytes + contentOff += n + case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Crc32C = &v + contentOff += n + default: + n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + } + } + case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: + // The field was found. Initialize the struct. + msg.ObjectChecksums = &storagepb.ObjectChecksums{} + + // Get the bytes corresponding to the checksums. + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + } + off += n + + // Unmarshal. + if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { + return nil, err + } + case fieldNum == contentRangeField && fieldType == protowire.BytesType: + msg.ContentRange = &storagepb.ContentRange{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { + return nil, err + } + case fieldNum == metadataField && fieldType == protowire.BytesType: + msg.Metadata = &storagepb.Object{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { + return nil, err + } + default: + fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) + if fieldLength < 0 { + return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) + } + off += fieldLength + } + } + + return msg, nil +} + +// readProtoBytes returns the contents of the protobuf field with number num +// and type bytes from a wire-encoded message. If the field cannot be found, +// the returned slice will be nil and no error will be returned. +// +// It does not handle field concatenation, in which the contents of a single field +// are split across multiple protobuf tags. Encoded data containing split fields +// of this form is technically permissable, but uncommon. +func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { + off := 0 + for off < len(b) { + gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + if gotNum == num && gotTyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + return b, nil + } + n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + } + return nil, nil } // reopenStream "closes" the existing stream and attempts to reopen a stream and @@ -1630,6 +1971,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Send a request with as many bytes as possible. // Loop until all bytes are sent. +sendBytes: // label this loop so that we can use a continue statement from a nested block for { bytesNotYetSent := recvd - sent remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize @@ -1707,10 +2049,6 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // we retry. w.stream = nil - // Drop the stream reference as a new one will need to be created if - // we can retry the upload - w.stream = nil - // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received. @@ -1724,7 +2062,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Continue sending requests, opening a new stream and resending // any bytes not yet persisted as per QueryWriteStatus - continue + continue sendBytes } } if err != nil { @@ -1739,7 +2077,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Not done sending data, do not attempt to commit it yet, loop around // and send more data. if recvd-sent > 0 { - continue + continue sendBytes } // The buffer has been uploaded and there is still more data to be @@ -1770,7 +2108,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Drop the stream reference as a new one will need to be created. w.stream = nil - continue + continue sendBytes } if err != nil { return nil, 0, err @@ -1780,7 +2118,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Retry if not all bytes were persisted. writeOffset = resp.GetPersistedSize() sent = int(writeOffset) - int(start) - continue + continue sendBytes } } else { // If the object is done uploading, close the send stream to signal @@ -1800,6 +2138,15 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st var obj *storagepb.Object for obj == nil { resp, err := w.stream.Recv() + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + w.stream = nil + continue sendBytes + } if err != nil { return nil, 0, err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index e3e0d761bb..f75d93897d 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "net/http" @@ -1218,9 +1219,12 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin } type httpReader struct { - body io.ReadCloser - seen int64 - reopen func(seen int64) (*http.Response, error) + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc } func (r *httpReader) Read(p []byte) (int, error) { @@ -1229,7 +1233,22 @@ func (r *httpReader) Read(p []byte) (int, error) { m, err := r.body.Read(p[n:]) n += m r.seen += int64(m) - if err == nil || err == io.EOF { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.checkCRC { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } return n, err } // Read failed (likely due to connection issues), but we will try to reopen @@ -1435,11 +1454,12 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen Attrs: attrs, size: size, remain: remain, - wantCRC: crc, checkCRC: checkCRC, reader: &httpReader{ - reopen: reopen, - body: body, + reopen: reopen, + body: body, + wantCRC: crc, + checkCRC: checkCRC, }, }, nil } diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 43604a2b17..1c52a3504b 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.39.1" +const Version = "1.40.0" diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 4673a68d07..0b228a6a76 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -198,9 +198,7 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) type Reader struct { Attrs ReaderObjectAttrs seen, remain, size int64 - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc + checkCRC bool // Did we check the CRC? This is now only used by tests. reader io.ReadCloser ctx context.Context @@ -218,17 +216,17 @@ func (r *Reader) Read(p []byte) (int, error) { if r.remain != -1 { r.remain -= int64(n) } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if err == io.EOF { - if r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } + return n, err +} + +// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface. +// This is called implicitly when calling io.Copy on a Reader. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + // This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an + // implementation of WriteTo. + n, err := io.Copy(w, r.reader) + if r.remain != -1 { + r.remain -= int64(n) } return n, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore new file mode 100644 index 0000000000..8cdb910365 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore @@ -0,0 +1,4 @@ +# live test artifacts +Dockerfile +k8s.yaml +sshkey* diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f6749c0305..6d4b6feb86 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,35 @@ # Release History +## 1.6.0 (2024-06-10) + +### Features Added +* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential + that authenticates with client assertions such as federated credentials + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Removed `AzurePipelinesCredential` and the persistent token caching API. + They will return in v1.7.0-beta.1 + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0-beta.4 (2024-05-14) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with + workload identity federation + +## 1.6.0-beta.3 (2024-04-09) + +### Breaking Changes +* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity + environments to avoid excessive retry delays when the IMDS endpoint is not available. This + should improve credential chain resolution for local development scenarios. + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + ## 1.5.2 (2024-04-09) ### Bugs Fixed @@ -9,6 +39,28 @@ * Restored v1.4.0 error behavior for empty tenant IDs * Upgraded dependencies +## 1.6.0-beta.2 (2024-02-06) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.1 +* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct + type that carries the `TokenRequestOptions` passed to the `GetToken` call which + returned the error. + +### Bugs Fixed +* Fixed more cases in which credential chains like `DefaultAzureCredential` + should try their next credential after attempting managed identity + authentication in a Docker Desktop container + +### Other Changes +* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration + +## 1.6.0-beta.1 (2024-01-17) + +### Features Added +* Restored persistent token caching API first added in v1.5.0-beta.1 +* Added `AzureCLICredentialOptions.Subscription` + ## 1.5.1 (2024-01-17) ### Bugs Fixed @@ -135,7 +187,7 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for - [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation). This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 1a64920230..4404be8244 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index b6ad2d39f8..b5acff0e63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -30,7 +30,7 @@ When debugging and executing code locally, developers typically use their own ac #### Authenticating via the Azure CLI `DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user -signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. +signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. @@ -69,14 +69,14 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ## Managed Identity `DefaultAzureCredential` and `ManagedIdentityCredential` support -[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview) in any hosting environment which supports managed identities, such as (this list is not exhaustive): -* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) -* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) -* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) -* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) -* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) -* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) +* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token) ## Examples @@ -207,7 +207,7 @@ For more details, see the [token caching documentation](https://aka.ms/azsdk/go/ Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index c0d6601469..f9cc489433 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -45,7 +45,7 @@ With persistent disk token caching enabled, the library first determines if a va #### Example code -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. ### Credentials supporting token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 832c599eb9..3564e685e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -58,7 +58,7 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. - __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. @@ -97,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -123,20 +123,20 @@ azlog.SetEvents(azidentity.EventAuthentication) |Host Environment| | | |---|---|---| -|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| -|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| |Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| -|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| -|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| +|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| ### Azure Virtual Machine managed identity | Error Message |Description| Mitigation | |---|---|---| -|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| |The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| -|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| -|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| #### Verify IMDS is available on the VM @@ -152,7 +152,7 @@ curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://man | Error Message |Description| Mitigation | |---|---|---| -|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| #### Verify the App Service managed identity endpoint is available @@ -177,8 +177,8 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio | Error Message |Description| Mitigation | |---|---|---| -|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| -|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| #### Verify the Azure CLI can obtain tokens diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 1be55a4bdd..bff0c44dac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_98074050dc" + "Tag": "go/azidentity_087379b475" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 43577ab3c5..b9976f5fed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -35,9 +35,9 @@ type AzureCLICredentialOptions struct { // logged in account can access. AdditionallyAllowedTenants []string - // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other // than the Azure CLI's current account. - subscription string + Subscription string // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. @@ -68,9 +68,9 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } - for _, r := range cp.subscription { + for _, r := range cp.Subscription { if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { - return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription) } } if cp.TenantID != "" && !validTenantID(cp.TenantID) { @@ -97,7 +97,7 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ } c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription) if err == nil { at, err = c.createAccessToken(b) } @@ -163,26 +163,21 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { t := struct { - AccessToken string `json:"accessToken"` - Authority string `json:"_authority"` - ClientID string `json:"_clientId"` - ExpiresOn string `json:"expiresOn"` - IdentityProvider string `json:"identityProvider"` - IsMRRT bool `json:"isMRRT"` - RefreshToken string `json:"refreshToken"` - Resource string `json:"resource"` - TokenType string `json:"tokenType"` - UserID string `json:"userId"` + AccessToken string `json:"accessToken"` + Expires_On int64 `json:"expires_on"` + ExpiresOn string `json:"expiresOn"` }{} err := json.Unmarshal(tk, &t) if err != nil { return azcore.AccessToken{}, err } - // the Azure CLI's "expiresOn" is local time - exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) - if err != nil { - return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + exp := time.Unix(t.Expires_On, 0) + if t.Expires_On == 0 { + exp, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("%s: error parsing token expiration time %q: %v", credNameAzureCLI, t.ExpiresOn, err) + } } converted := azcore.AccessToken{ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go new file mode 100644 index 0000000000..2655543aee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const ( + credNameAzurePipelines = "AzurePipelinesCredential" + oidcAPIVersion = "7.1" + systemAccessToken = "SYSTEM_ACCESSTOKEN" + systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" +) + +// azurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See +// [Azure Pipelines documentation] for more information. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation +type azurePipelinesCredential struct { + connectionID, oidcURI, systemAccessToken string + cred *ClientAssertionCredential +} + +// azurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. +type azurePipelinesCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// newAzurePipelinesCredential is the constructor for AzurePipelinesCredential. In addition to its required arguments, +// it reads a security token for the running build, which is required to authenticate the service connection, from the +// environment variable SYSTEM_ACCESSTOKEN. See the [Azure Pipelines documentation] for an example showing how to set +// this variable in build job YAML. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken +func newAzurePipelinesCredential(tenantID, clientID, serviceConnectionID string, options *azurePipelinesCredentialOptions) (*azurePipelinesCredential, error) { + if options == nil { + options = &azurePipelinesCredentialOptions{} + } + u := os.Getenv(systemOIDCRequestURI) + if u == "" { + return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI) + } + sat := os.Getenv(systemAccessToken) + if sat == "" { + return nil, errors.New("no value for environment variable " + systemAccessToken) + } + a := azurePipelinesCredential{ + connectionID: serviceConnectionID, + oidcURI: u, + systemAccessToken: sat, + } + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, a.getAssertion, &caco) + if err != nil { + return nil, err + } + cred.client.name = credNameAzurePipelines + a.cred = cred + return &a, nil +} + +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. +func (a *azurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := a.cred.GetToken(ctx, opts) + return tk, err +} + +func (a *azurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { + url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID + url, err := runtime.EncodeQueryParams(url) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + } + req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + res, err := doForClient(a.cred.client.azClient, req) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + } + if res.StatusCode != http.StatusOK { + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + // include the response because its body, if any, probably contains an error message. + // OK responses aren't included with errors because they probably contain secrets + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + } + b, err := runtime.Payload(res) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + } + var r struct { + OIDCToken string `json:"oidcToken"` + } + err = json.Unmarshal(b, &r) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + } + return r.OIDCToken, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index dc855edf78..6c35a941b9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -86,7 +86,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token errs []error successfulCredential azcore.TokenCredential token azcore.AccessToken - unavailableErr *credentialUnavailableError + unavailableErr credentialUnavailable ) for _, cred := range c.sources { token, err = cred.GetToken(ctx, opts) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index d077682c5c..4cd8c51447 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -8,7 +8,7 @@ trigger: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ pr: branches: @@ -19,17 +19,28 @@ pr: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - RunLiveTests: true - UsePipelineProxy: false - ServiceDirectory: 'azidentity' - CloudConfig: - Public: - SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - # Contains alternate tenant, AAD app and cert info for testing - - $(sub-config-identity-test-resources) +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + - $(sub-config-identity-test-resources) + EnvVars: + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + RunLiveTests: true + ServiceDirectory: azidentity + UsePipelineProxy: false + + ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + MatrixConfigs: + - Name: managed_identity_matrix + GenerateVMJobs: true + Path: sdk/azidentity/managed-identity-matrix.json + Selection: sparse + MatrixReplace: + - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi + - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index fc3df68eb1..b588750ef3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -23,7 +23,7 @@ const credNameAssertion = "ClientAssertionCredential" // the most common assertion scenario, authenticating a service principal with a certificate. See // [Microsoft Entra ID documentation] for details of the assertion format. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 607533f486..80cd96b560 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -51,7 +51,8 @@ type ClientCertificateCredential struct { client *confidentialClient } -// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. See +// [ParseCertificates] for help loading a certificate. func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { if len(certs) == 0 { return nil, errors.New("at least one certificate is required") @@ -86,8 +87,10 @@ func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy. return tk, err } -// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. -// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +// ParseCertificates loads certificates and a private key, in PEM or PKCS#12 format, for use with [NewClientCertificateCredential]. +// Pass nil for password if the private key isn't encrypted. This function has limitations, for example it can't decrypt keys in +// PEM format or PKCS#12 certificates that use SHA256 for message authentication. If you encounter such limitations, consider +// using another module to load the certificate and private key. func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { var blocks []*pem.Block var err error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 854267bdbf..3bd08c685f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -91,7 +91,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } tro.TenantID = tenant } - client, mu, err := c.client(ctx, tro) + client, mu, err := c.client(tro) if err != nil { return azcore.AccessToken{}, err } @@ -109,7 +109,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque if err != nil { // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { res := getResponseFromError(err) err = newAuthenticationFailedError(c.name, err.Error(), res, err) @@ -121,7 +121,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { +func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { c.clientMu.Lock() defer c.clientMu.Unlock() if tro.EnableCAE { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 35aeef8674..551d319946 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -8,10 +8,8 @@ package azidentity import ( "context" - "errors" "os" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -98,13 +96,13 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) } - o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true} if ID, ok := os.LookupEnv(azureClientID); ok { o.ID = ClientID(ID) } miCred, err := NewManagedIdentityCredential(o) if err == nil { - creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + creds = append(creds, miCred) } else { errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) @@ -158,51 +156,10 @@ type defaultCredentialErrorReporter struct { } func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - if _, ok := d.err.(*credentialUnavailableError); ok { + if _, ok := d.err.(credentialUnavailable); ok { return azcore.AccessToken{}, d.err } return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) } var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) - -// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available -type timeoutWrapper struct { - mic *ManagedIdentityCredential - // timeout applies to all auth attempts until one doesn't time out - timeout time.Duration -} - -// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout -// because managed identity may not be available and connecting to IMDS can take several minutes to time out. -func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - var tk azcore.AccessToken - var err error - // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section - if w.timeout > 0 { - c, cancel := context.WithTimeout(ctx, w.timeout) - defer cancel() - tk, err = w.mic.GetToken(c, opts) - if isAuthFailedDueToContext(err) { - err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information") - } else { - // some managed identity implementation is available, so don't apply the timeout to future calls - w.timeout = 0 - } - } else { - tk, err = w.mic.GetToken(ctx, opts) - } - return tk, err -} - -// unwraps nested AuthenticationFailedErrors to get the root error -func isAuthFailedDueToContext(err error) bool { - for { - var authFailedErr *AuthenticationFailedError - if !errors.As(err, &authFailedErr) { - break - } - err = authFailedErr.err - } - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go index d8b952f532..be963d3a2a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -19,7 +19,7 @@ const cliTimeout = 10 * time.Second // the next credential in its chain (another developer credential). func unavailableIfInChain(err error, inDefaultChain bool) error { if err != nil && inDefaultChain { - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 1b7a283703..cd30bedd5e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -34,8 +34,8 @@ type DeviceCodeCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user - // interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 42f84875e2..b30f5474f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -57,6 +57,9 @@ type EnvironmentCredentialOptions struct { // // AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. // +// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this +// function isn't able to parse your certificate, use [ClientCertificateCredential] instead. +// // # User with username and password // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". @@ -121,7 +124,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } certs, key, err := ParseCertificates(certData, password) if err != nil { - return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + return nil, fmt.Errorf("failed to parse %q due to error %q. This may be due to a limitation of this module's certificate loader. Consider calling NewClientCertificateCredential instead", certPath, err.Error()) } o := &ClientCertificateCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 335d2b7dcf..698650bbb6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -13,15 +13,12 @@ import ( "fmt" "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) -// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token -// because user interaction is required and the credential is configured not to automatically prompt the user. -var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} - // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -56,7 +53,7 @@ func (e *AuthenticationFailedError) Error() string { return e.credType + ": " + e.message } msg := &bytes.Buffer{} - fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s authentication failed. %s\n", e.credType, e.message) if e.RawResponse.Request != nil { fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) } else { @@ -110,8 +107,34 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required -// data or state +// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// because the credential requires user interaction and is configured not to request it automatically. +type authenticationRequiredError struct { + credentialUnavailableError + + // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. + TokenRequestOptions policy.TokenRequestOptions +} + +func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &authenticationRequiredError{ + credentialUnavailableError: credentialUnavailableError{ + credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", + }, + TokenRequestOptions: tro, + } +} + +var ( + _ credentialUnavailable = (*authenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) +) + +type credentialUnavailable interface { + error + credentialUnavailable() +} + type credentialUnavailableError struct { message string } @@ -135,6 +158,11 @@ func (e *credentialUnavailableError) Error() string { } // NonRetriable is a marker method indicating this error should not be retried. It has no implementation. -func (e *credentialUnavailableError) NonRetriable() {} +func (*credentialUnavailableError) NonRetriable() {} + +func (*credentialUnavailableError) credentialUnavailable() {} -var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +var ( + _ credentialUnavailable = (*credentialUnavailableError)(nil) + _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum index 65bcba7dfe..c592f283b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -3,12 +3,20 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9an github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -16,14 +24,19 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -35,7 +48,13 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index bd82969837..056785a8a3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -33,8 +33,8 @@ type InteractiveBrowserCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when - // user interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json new file mode 100644 index 0000000000..1c3791777a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -0,0 +1,17 @@ +{ + "include": [ + { + "Agent": { + "msi_image": { + "ArmTemplateParameters": "@{deployResources = $true}", + "OSVmImage": "env:LINUXNEXTVMIMAGE", + "Pool": "env:LINUXPOOL" + } + }, + "GoVersion": [ + "1.22.1" + ], + "IDENTITY_IMDS_AVAILABLE": "1" + } + ] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index d129a1e91c..6122cc7005 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -14,13 +14,15 @@ import ( "net/http" "net/url" "os" + "path/filepath" + "runtime" "strconv" "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" @@ -44,6 +46,8 @@ const ( serviceFabricAPIVersion = "2019-07-01-preview" ) +var imdsProbeTimeout = time.Second + type msiType int const ( @@ -55,13 +59,28 @@ const ( msiTypeServiceFabric ) -// managedIdentityClient provides the base for authenticating in managed identity environments -// This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - azClient *azcore.Client - msiType msiType - endpoint string - id ManagedIDKind + azClient *azcore.Client + endpoint string + id ManagedIDKind + msiType msiType + probeIMDS bool +} + +// arcKeyDirectory returns the directory expected to contain Azure Arc keys +var arcKeyDirectory = func() (string, error) { + switch runtime.GOOS { + case "linux": + return "/var/opt/azcmagent/tokens", nil + case "windows": + pd := os.Getenv("ProgramData") + if pd == "" { + return "", errors.New("environment variable ProgramData has no value") + } + return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil + default: + return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) + } } type wrappedNumber json.Number @@ -88,7 +107,7 @@ func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { if o.StatusCodes == nil { o.StatusCodes = []int{ // IMDS docs recommend retrying 404, 410, 429 and 5xx - // https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + // https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#error-handling http.StatusNotFound, // 404 http.StatusGone, // 410 http.StatusTooManyRequests, // 429 @@ -147,11 +166,12 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeCloudShell } } else { + c.probeIMDS = options.dac setIMDSRetryOptionDefaults(&cp.Retry) } - client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ - Tracing: runtime.TracingOptions{ + client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + Tracing: azruntime.TracingOptions{ Namespace: traceNamespace, }, }, &cp) @@ -180,6 +200,27 @@ func (c *managedIdentityClient) provideToken(ctx context.Context, params confide // authenticate acquires an access token func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, + // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block + if c.probeIMDS { + cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) + defer cancel() + cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) + req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) + if err == nil { + _, err = c.azClient.Pipeline().Do(req) + } + if err != nil { + msg := err.Error() + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + } + // send normal token requests from now on because something responded + c.probeIMDS = false + } + msg, err := c.createAuthRequest(ctx, id, scopes) if err != nil { return azcore.AccessToken{}, err @@ -190,7 +231,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } - if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { return c.createAccessToken(resp) } @@ -201,15 +242,15 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) } msg := "failed to authenticate a system assigned identity" - if body, err := runtime.Payload(resp); err == nil && len(body) > 0 { + if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { msg += fmt.Sprintf(". The endpoint responded with %s", body) } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) case http.StatusForbidden: // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, // we return credentialUnavailableError so credential chains continue to their next credential - body, err := runtime.Payload(resp) - if err == nil && strings.Contains(string(body), "A socket operation was attempted to an unreachable network") { + body, err := azruntime.Payload(resp) + if err == nil && strings.Contains(string(body), "unreachable") { return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } @@ -226,7 +267,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} - if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) } if value.ExpiresIn != "" { @@ -276,7 +317,7 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage } func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -296,7 +337,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -316,7 +357,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -339,7 +380,7 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -362,7 +403,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { // create the request to retreive the secret key challenge provided by the HIMDS service - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return "", err } @@ -384,22 +425,36 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", errors.New("did not receive a value from WWW-Authenticate header") + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key - pos := strings.LastIndex(header, "=") - if pos == -1 { - return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + _, p, found := strings.Cut(header, "=") + if !found { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + } + expected, err := arcKeyDirectory() + if err != nil { + return "", err + } + if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + } + f, err := os.Stat(p) + if err != nil { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + } + if s := f.Size(); s > 4096 { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) } - key, err := os.ReadFile(header[pos+1:]) + key, err := os.ReadFile(p) if err != nil { - return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) } return string(key), nil } func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -421,7 +476,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i } func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index dcd278befa..13c043d8e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -64,12 +64,19 @@ type ManagedIdentityCredentialOptions struct { // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that // some platforms don't accept resource IDs. ID ManagedIDKind + + // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have + // configuration for a specific managed identity API, the credential tries to determine whether IMDS is available before + // sending its first token request. It does this by sending a malformed request with a short timeout. Any response to that + // request is taken to mean IMDS is available, in which case the credential will send ordinary token requests thereafter + // with no special timeout. The purpose of this behavior is to prevent a very long timeout when IMDS isn't available. + dac bool } // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a // user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 5e67cf0214..9dcc82f013 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -10,6 +10,7 @@ import ( "context" "crypto" "crypto/x509" + "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -24,7 +25,7 @@ const credNameOBO = "OnBehalfOfCredential" // is not an interactive authentication flow, an application using it must have admin consent for any delegated // permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -60,6 +61,19 @@ func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion st return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) } +// NewOnBehalfOfCredentialWithClientAssertions constructs an OnBehalfOfCredential that authenticates with client assertions. +// userAssertion is the user's access token for the application. The getAssertion function should return client assertions +// that authenticate the application to Microsoft Entra ID, such as federated credentials. +func NewOnBehalfOfCredentialWithClientAssertions(tenantID, clientID, userAssertion string, getAssertion func(context.Context) (string, error), options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion can't be nil. It must be a function that returns client assertions") + } + cred := confidential.NewCredFromAssertionCallback(func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }) + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + // NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { cred, err := confidential.NewCredFromSecret(clientSecret) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 63c31190d1..b3d22dbf3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, errAuthenticationRequired + return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 new file mode 100644 index 0000000000..a69bbce34c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + +param ( + [hashtable] $AdditionalParameters = @{}, + [hashtable] $DeploymentOutputs +) + +$ErrorActionPreference = 'Stop' +$PSNativeCommandUseErrorActionPreference = $true + +if ($CI) { + if (!$AdditionalParameters['deployResources']) { + Write-Host "Skipping post-provisioning script because resources weren't deployed" + return + } + az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] +} + +Write-Host "Building container" +$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" +Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +ENV GOARCH=amd64 GOWORK=off +COPY . /azidentity +WORKDIR /azidentity/testdata/managed-id-test +RUN go mod tidy +RUN go build -o /build/managed-id-test . +RUN GOOS=windows go build -o /build/managed-id-test.exe . + +FROM mcr.microsoft.com/mirror/docker/library/alpine:3.16 +RUN apk add gcompat +COPY --from=builder /build/* . +RUN chmod +x managed-id-test +CMD ["./managed-id-test"] +"@ +# build from sdk/azidentity because we need that dir in the context (because the test app uses local azidentity) +docker build -t $image "$PSScriptRoot" +az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME'] +docker push $image + +$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP'] + +# ACI is easier to provision here than in the bicep file because the image isn't available before now +Write-Host "Deploying Azure Container Instance" +$aciName = "azidentity-test" +az container create -g $rg -n $aciName --image $image ` + --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --role "Storage Blob Data Reader" ` + --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` + -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" + +# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip +Write-Host "Deploying to Azure Functions" +$container = docker create $image +docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/" +docker rm -v $container +Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force +az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip + +Write-Host "Creating federated identity" +$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME'] +$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME'] +$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv +$podName = "azidentity-test" +$serviceAccountName = "workload-identity-sa" +az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName +Write-Host "Deploying to AKS" +az aks get-credentials -g $rg -n $aksName +az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName +Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @" +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + azure.workload.identity/client-id: $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) + name: $serviceAccountName + namespace: default +--- +apiVersion: v1 +kind: Pod +metadata: + name: $podName + namespace: default + labels: + app: $podName + azure.workload.identity/use: "true" +spec: + serviceAccountName: $serviceAccountName + containers: + - name: $podName + image: $image + env: + - name: AZIDENTITY_STORAGE_NAME + value: $($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) + - name: AZIDENTITY_USE_WORKLOAD_IDENTITY + value: "true" + - name: FUNCTIONS_CUSTOMHANDLER_PORT + value: "80" + nodeSelector: + kubernetes.io/os: linux +"@ +kubectl apply -f "$PSScriptRoot/k8s.yaml" +Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 index fe0183adde..58766d0a02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -1,36 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + [CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] param ( + [hashtable] $AdditionalParameters = @{}, + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments ) +if (-not (Test-Path "$PSScriptRoot/sshkey.pub")) { + ssh-keygen -t rsa -b 4096 -f "$PSScriptRoot/sshkey" -N '' -C '' +} +$templateFileParameters['sshPubKey'] = Get-Content "$PSScriptRoot/sshkey.pub" + if (!$CI) { # TODO: Remove this once auto-cloud config downloads are supported locally Write-Host "Skipping cert setup in local testing mode" return } -if ($EnvironmentVariables -eq $null -or $EnvironmentVariables.Count -eq 0) { +if ($null -eq $EnvironmentVariables -or $EnvironmentVariables.Count -eq 0) { throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" } $tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() $pfxPath = Join-Path $tmp "test.pfx" $pemPath = Join-Path $tmp "test.pem" -$sniPath = Join-Path $tmp "testsni.pfx" -Write-Host "Creating identity test files: $pfxPath $pemPath $sniPath" +Write-Host "Creating identity test files: $pfxPath $pemPath" [System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] -[System.Convert]::FromBase64String($EnvironmentVariables['SNI_CONTENTS']) | Set-Content -Path $sniPath -AsByteStream # Set for pipeline Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" -Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_SNI;]$sniPath" # Set for local $env:IDENTITY_SP_CERT_PFX = $pfxPath $env:IDENTITY_SP_CERT_PEM = $pemPath -$env:IDENTITY_SP_CERT_SNI = $sniPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index b3490d3b50..2a21652930 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -1 +1,219 @@ -param baseName string +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +@description('Kubernetes cluster admin user name.') +param adminUser string = 'azureuser' + +@minLength(6) +@maxLength(23) +@description('The base resource name.') +param baseName string = resourceGroup().name + +@description('Whether to deploy resources. When set to false, this file deploys nothing.') +param deployResources bool = false + +param sshPubKey string = '' + +@description('The location of the resource. By default, this is the same as the resource group.') +param location string = resourceGroup().location + +// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles +var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') +var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') + +resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource saUserAssigned 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa2${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource usermgdid 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = if (deployResources) { + location: location + name: baseName +} + +resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, acrPull, 'containerInstance') + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: acrPull + } + scope: containerRegistry +} + +resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + scope: saUserAssigned + name: guid(resourceGroup().id, blobReader, usermgdid.id) + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: blobReader + } +} + +resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, blobReader, 'azfunc') + properties: { + principalId: deployResources ? azfunc.identity.principalId : '' + roleDefinitionId: blobReader + principalType: 'ServicePrincipal' + } + scope: sa +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) { + location: location + name: uniqueString(resourceGroup().id) + properties: { + adminUserEnabled: true + } + sku: { + name: 'Basic' + } +} + +resource farm 'Microsoft.Web/serverfarms@2021-03-01' = if (deployResources) { + kind: 'app' + location: location + name: '${baseName}_asp' + properties: {} + sku: { + capacity: 1 + family: 'B' + name: 'B1' + size: 'B1' + tier: 'Basic' + } +} + +resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { + identity: { + type: 'SystemAssigned, UserAssigned' + userAssignedIdentities: { + '${deployResources ? usermgdid.id : ''}': {} + } + } + kind: 'functionapp' + location: location + name: '${baseName}func' + properties: { + enabled: true + httpsOnly: true + keyVaultReferenceIdentity: 'SystemAssigned' + serverFarmId: farm.id + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'AZIDENTITY_STORAGE_NAME' + value: deployResources ? sa.name : null + } + { + name: 'AZIDENTITY_STORAGE_NAME_USER_ASSIGNED' + value: deployResources ? saUserAssigned.name : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' + value: deployResources ? usermgdid.id : null + } + { + name: 'AzureWebJobsStorage' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'FUNCTIONS_EXTENSION_VERSION' + value: '~4' + } + { + name: 'FUNCTIONS_WORKER_RUNTIME' + value: 'custom' + } + { + name: 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'WEBSITE_CONTENTSHARE' + value: toLower('${baseName}-func') + } + ] + http20Enabled: true + minTlsVersion: '1.2' + } + } +} + +resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deployResources) { + name: baseName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + agentPoolProfiles: [ + { + count: 1 + enableAutoScaling: false + kubeletDiskType: 'OS' + mode: 'System' + name: 'agentpool' + osDiskSizeGB: 128 + osDiskType: 'Managed' + osSKU: 'Ubuntu' + osType: 'Linux' + type: 'VirtualMachineScaleSets' + vmSize: 'Standard_D2s_v3' + } + ] + dnsPrefix: 'identitytest' + enableRBAC: true + linuxProfile: { + adminUsername: adminUser + ssh: { + publicKeys: [ + { + keyData: sshPubKey + } + ] + } + } + oidcIssuerProfile: { + enabled: true + } + securityProfile: { + workloadIdentity: { + enabled: true + } + } + } +} + +output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : '' +output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : '' +output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : '' +output AZIDENTITY_FUNCTION_NAME string = deployResources ? azfunc.name : '' +output AZIDENTITY_STORAGE_ID string = deployResources ? sa.id : '' +output AZIDENTITY_STORAGE_NAME string = deployResources ? sa.name : '' +output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAssigned.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 9b9d7ae0d2..459ef64c6f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.5.2" + version = "v1.6.0" ) diff --git a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go b/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go index 092ba37193..046af6cebb 100644 --- a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go +++ b/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go @@ -272,13 +272,27 @@ func (b *bucket) cleanLocked() { bGen := b.gen & ((1 << genSizeBits) - 1) bIdx := b.idx bm := b.m - for k, v := range bm { + newItems := 0 + for _, v := range bm { gen := v >> bucketSizeBits idx := v & ((1 << bucketSizeBits) - 1) if (gen+1 == bGen || gen == maxGen && bGen == 1) && idx >= bIdx || gen == bGen && idx < bIdx { - continue + newItems++ } - delete(bm, k) + } + if newItems < len(bm) { + // Re-create b.m with valid items, which weren't expired yet instead of deleting expired items from b.m. + // This should reduce memory fragmentation and the number Go objects behind b.m. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5379 + bmNew := make(map[uint64]uint64, newItems) + for k, v := range bm { + gen := v >> bucketSizeBits + idx := v & ((1 << bucketSizeBits) - 1) + if (gen+1 == bGen || gen == maxGen && bGen == 1) && idx >= bIdx || gen == bGen && idx < bIdx { + bmNew[k] = v + } + } + b.m = bmNew } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go index c07f6731ea..eeb3bc0c5a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go @@ -8,12 +8,12 @@ import ( // based on some key. The datastructure makes use of a read write // mutex to enable asynchronous use. type EndpointCache struct { - endpoints syncMap - endpointLimit int64 // size is used to count the number elements in the cache. // The atomic package is used to ensure this size is accurate when // using multiple goroutines. - size int64 + size int64 + endpoints syncMap + endpointLimit int64 } // NewEndpointCache will return a newly initialized cache with a limit diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 6db2a79e82..3448358f39 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -74,7 +74,9 @@ const ( ) // AWS ISOE (Europe) partition's regions. -const () +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) // AWS ISOF partition's regions. const () @@ -244,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4878,6 +4873,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4894,6 +4897,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, endpointKey{ Region: "bedrock-eu-west-3", }: endpoint{ @@ -4950,6 +4961,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4966,6 +4985,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-west-3", }: endpoint{ @@ -4990,6 +5017,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-east-1", }: endpoint{ @@ -5006,6 +5041,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-us-east-1", }: endpoint{ @@ -5022,15 +5065,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -5055,6 +5107,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -5085,6 +5140,12 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5299,69 +5360,157 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, }, }, "cloudcontrolapi": service{ @@ -9266,9 +9415,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9293,6 +9454,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -15441,13 +15620,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -15570,6 +15742,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17474,12 +17649,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24101,6 +24291,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -24116,15 +24309,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25172,9 +25377,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25229,6 +25440,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -27256,6 +27473,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -31806,6 +32072,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -33729,6 +34013,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -33738,6 +34025,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -37318,6 +37608,9 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "resource-groups": service{ @@ -45676,42 +45969,12 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, }, }, "rbin": service{ @@ -45756,37 +46019,10 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-iso-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -45795,16 +46031,7 @@ var awsisoPartition = partition{ endpointKey{ Region: "rds.us-iso-west-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -45817,12 +46044,12 @@ var awsisoPartition = partition{ Region: "us-iso-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -45835,12 +46062,12 @@ var awsisoPartition = partition{ Region: "us-iso-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-west-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -46799,24 +47026,9 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, }, }, "rbin": service{ @@ -46843,28 +47055,10 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-isob-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -46877,12 +47071,12 @@ var awsisobPartition = partition{ Region: "us-isob-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", }, endpointKey{ Region: "us-isob-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -47237,7 +47431,11 @@ var awsisoePartition = partition{ SignatureVersions: []string{"v4"}, }, }, - Regions: regions{}, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, Services: services{}, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f5dc6591dd..28238536bf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.53.6" +const SDKVersion = "1.53.20" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 058334053c..2ca0b19db7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 0d28ae695f..f0a849820a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -484,6 +484,10 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // - The total request size exceeds 16 MB. // +// - Any individual items with keys exceeding the key length limits. For +// a partition key, the limit is 2048 bytes and for a sort key, the limit +// is 1024 bytes. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -781,9 +785,8 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // relationship between two or more DynamoDB tables with the same table name // in the provided Regions. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version @@ -1552,8 +1555,8 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req // If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. // If table is already in the DELETING state, no error is returned. // -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // DynamoDB might continue to accept data read and write operations, such as // GetItem and PutItem, on a table in the DELETING state until the table deletion @@ -2268,9 +2271,8 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( // // Returns information about the specified global table. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version @@ -2383,9 +2385,8 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable // // Describes Region-specific settings for a global table. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version @@ -2848,8 +2849,8 @@ func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request // table, when it was created, the primary key schema, and any indexes on the // table. // -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // If you issue a DescribeTable request immediately after a CreateTable request, // DynamoDB might return a ResourceNotFoundException. This is because DescribeTable @@ -2940,8 +2941,8 @@ func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableRe // // Describes auto scaling settings across replicas of the global table at once. // -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4711,9 +4712,8 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r // // Lists all global tables that have a replica in the specified Region. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version @@ -7391,16 +7391,16 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // schema, have DynamoDB Streams enabled, and have the same provisioned and // maximum write capacity units. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version // 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) // you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html) // instead. // @@ -7537,9 +7537,8 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // // Updates settings for a global table. // -// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version), as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To // determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). // To update existing global tables from version 2017.11.29 (Legacy) to version @@ -7961,8 +7960,8 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // Modifies the provisioned throughput settings, global secondary indexes, or // DynamoDB Streams settings for a given table. // -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // You can only perform one of the following operations at once: // @@ -8089,8 +8088,8 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // // Updates auto scaling settings on your global tables at once. // -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10798,8 +10797,8 @@ func (s *ConditionalCheckFailedException) RequestID() string { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned -// if the request asked for it. For more information, see Provisioned Throughput -// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// if the request asked for it. For more information, see Provisioned capacity +// mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. type ConsumedCapacity struct { _ struct{} `type:"structure"` @@ -11544,10 +11543,11 @@ type CreateTableInput struct { // capacity. This setting can be changed later. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). BillingMode *string `type:"string" enum:"BillingMode"` // Indicates whether deletion protection is to be enabled (true) or disabled @@ -11668,6 +11668,9 @@ type CreateTableInput struct { // DynamoDB counts whitespaces when calculating the size of a policy against // this limit. For a full list of all considerations that apply for resource-based // policies, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). + // + // You need to specify the CreateTable and PutResourcePolicy IAM actions for + // authorizing a user to create a table with a resource-based policy. ResourcePolicy *string `type:"string"` // Represents the settings used to enable server-side encryption. @@ -12484,7 +12487,7 @@ type DeleteItemOutput struct { // includes the total provisioned throughput consumed, along with statistics // for the table and any indexes involved in the operation. ConsumedCapacity // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // more information, see Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -14546,8 +14549,8 @@ type ExecuteStatementOutput struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned - // if the request asked for it. For more information, see Provisioned Throughput - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // if the request asked for it. For more information, see Provisioned capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -15891,7 +15894,7 @@ type GetItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -20658,7 +20661,7 @@ type PutItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -21368,7 +21371,7 @@ type QueryOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -23984,7 +23987,7 @@ type ScanOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -26863,10 +26866,11 @@ type UpdateGlobalTableSettingsInput struct { // the global table defaults to PROVISIONED capacity billing mode. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` // Represents the settings of a global secondary index for a global table that @@ -27391,7 +27395,7 @@ type UpdateItemOutput struct { // includes the total provisioned throughput consumed, along with statistics // for the table and any indexes involved in the operation. ConsumedCapacity // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // more information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -27749,10 +27753,11 @@ type UpdateTableInput struct { // table and global secondary indexes over the past 30 minutes. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). BillingMode *string `type:"string" enum:"BillingMode"` // Indicates whether deletion protection is to be enabled (true) or disabled @@ -27786,8 +27791,8 @@ type UpdateTableInput struct { // A list of replica update actions (create, delete, or update) for the table. // - // This property only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) - // of global tables. + // For global tables, this property only applies to global tables using Version + // 2019.11.21 (Current version). ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"` // The new server-side encryption settings for the specified table. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 36e14883e6..8db93d6e9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -359,15 +359,15 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // - For ADM, PlatformPrincipal is client id and PlatformCredential is client // secret. // -// - For Baidu, PlatformPrincipal is API key and PlatformCredential is secret -// key. -// // - For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal // is SSL certificate and PlatformCredential is private key. // // - For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal // is signing key ID and PlatformCredential is signing key. // +// - For Baidu, PlatformPrincipal is API key and PlatformCredential is secret +// key. +// // - For GCM (Firebase Cloud Messaging) using key credentials, there is no // PlatformPrincipal. The PlatformCredential is API key. // @@ -5418,7 +5418,7 @@ type CreateTopicInput struct { // A map of attributes with their corresponding values. // - // The following lists the names, descriptions, and values of the special request + // The following lists names, descriptions, and values of the special request // parameters that the CreateTopic action uses: // // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed @@ -6412,15 +6412,14 @@ type GetSubscriptionAttributesOutput struct { // // * TopicArn – The topic ARN that the subscription is associated with. // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: + // The following attribute applies only to Amazon Data Firehose delivery stream + // subscriptions: // // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) + // Permission to write to the Firehose delivery stream Amazon SNS listed + // as a trusted entity Specifying a valid ARN for this attribute is required + // for Firehose delivery stream subscriptions. For more information, see + // Fanout to Firehose delivery streams (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) // in the Amazon SNS Developer Guide. Attributes map[string]*string `type:"map"` } @@ -8099,9 +8098,8 @@ type PublishInput struct { // to email endpoints. This field will also be included, if present, in the // standard JSON messages delivered to other endpoints. // - // Constraints: Subjects must be ASCII text that begins with a letter, number, - // or punctuation mark; must not include line breaks or control characters; - // and must be less than 100 characters long. + // Constraints: Subjects must be UTF-8 text with no line breaks or control characters, + // and less than 100 characters long. Subject *string `type:"string"` // If you don't specify a value for the TargetArn parameter, you must specify @@ -8889,15 +8887,14 @@ type SetSubscriptionAttributesInput struct { // endpoint becomes unavailable) are held in the dead-letter queue for further // analysis or reprocessing. // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: + // The following attribute applies only to Amazon Data Firehose delivery stream + // subscriptions: // // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) + // Permission to write to the Firehose delivery stream Amazon SNS listed + // as a trusted entity Specifying a valid ARN for this attribute is required + // for Firehose delivery stream subscriptions. For more information, see + // Fanout to Firehose delivery streams (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) // in the Amazon SNS Developer Guide. // // AttributeName is a required field @@ -9211,15 +9208,14 @@ type SubscribeInput struct { // endpoint becomes unavailable) are held in the dead-letter queue for further // analysis or reprocessing. // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: + // The following attribute applies only to Amazon Data Firehose delivery stream + // subscriptions: // // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) + // Permission to write to the Firehose delivery stream Amazon SNS listed + // as a trusted entity Specifying a valid ARN for this attribute is required + // for Firehose delivery stream subscriptions. For more information, see + // Fanout to Firehose delivery streams (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) // in the Amazon SNS Developer Guide. // // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dacd..eab5dbf7ba 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6d..969b910d70 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 0000000000..e44a0f4880 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 62df80a556..5551eb0bfa 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -847,7 +847,7 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index feb372228b..d51736e7e3 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.3" + "v2": "2.12.4" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 0d019d97fd..7e36eb48ff 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) + + +### Bug Fixes + +* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf)) + ## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 90348f303d..3006ad7bd9 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.3" +const Version = "2.12.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go index cc4486eb9e..9b690d40c4 100644 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -111,7 +111,8 @@ func (s *ProtoJSONStream) Recv() (proto.Message, error) { // Initialize a new instance of the protobuf message to unmarshal the // raw data into. m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + err := unm.Unmarshal(raw, m) return m, err } diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index 0739f5ff58..7c37c66a80 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -91,9 +91,7 @@ func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) } else { b.visited = b.visited[:visitedSize] - for i := range b.visited { - b.visited[i] = 0 - } + clear(b.visited) // set to 0 } if cap(b.cap) < ncap { diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index bc47f4c4a8..53cbd95839 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -6,7 +6,7 @@ package regexp import ( "regexp/syntax" - "sort" + "slices" "strings" "unicode" "unicode/utf8" @@ -33,11 +33,11 @@ type onePassInst struct { Next []uint32 } -// OnePassPrefix returns a literal string that all matches for the +// onePassPrefix returns a literal string that all matches for the // regexp must start with. Complete is true if the prefix // is the entire match. Pc is the index of the last rune instruction -// in the string. The OnePassPrefix skips over the mandatory -// EmptyBeginText +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { @@ -68,7 +68,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { return buf.String(), complete, pc } -// OnePassNext selects the next actionable state of the prog, based on the input character. +// onePassNext selects the next actionable state of the prog, based on the input character. // It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. // One of the alternates may ultimately lead without input to end of line. If the instruction // is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. @@ -218,7 +218,7 @@ func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { } } -// onePassCopy creates a copy of the original Prog, as we'll be modifying it +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. func onePassCopy(prog *syntax.Prog) *onePassProg { p := &onePassProg{ Start: prog.Start, @@ -282,13 +282,6 @@ func onePassCopy(prog *syntax.Prog) *onePassProg { return p } -// runeSlice exists to permit sorting the case-folded rune sets. -type runeSlice []rune - -func (p runeSlice) Len() int { return len(p) } -func (p runeSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p runeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} var anyRune = []rune{0, unicode.MaxRune} @@ -383,7 +376,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune...) } @@ -407,7 +400,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune[0], inst.Rune[0]) } diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index 7958a39728..d1218ad0e8 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -8,9 +8,7 @@ // general syntax used by Perl, Python, and other languages. // More precisely, it is the syntax accepted by RE2 and described at // https://golang.org/s/re2syntax, except for \C. -// For an overview of the syntax, run -// -// go doc regexp/syntax +// For an overview of the syntax, see the [regexp/syntax] package. // // The regexp implementation provided by this package is // guaranteed to run in time linear in the size of the input. @@ -23,10 +21,10 @@ // or any book about automata theory. // // All characters are UTF-8-encoded code points. -// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence // is treated as if it encoded utf8.RuneError (U+FFFD). // -// There are 16 methods of Regexp that match a regular expression and identify +// There are 16 methods of [Regexp] that match a regular expression and identify // the matched text. Their names are matched by this regular expression: // // Find(All)?(String)?(Submatch)?(Index)? @@ -82,7 +80,7 @@ import ( // Regexp is the representation of a compiled regular expression. // A Regexp is safe for concurrent use by multiple goroutines, -// except for configuration methods, such as Longest. +// except for configuration methods, such as [Regexp.Longest]. type Regexp struct { expr string // as passed to Compile prog *syntax.Prog // compiled program @@ -110,21 +108,21 @@ func (re *Regexp) String() string { return re.expr } -// Copy returns a new Regexp object copied from re. -// Calling Longest on one copy does not affect another. +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. // -// Deprecated: In earlier releases, when using a Regexp in multiple goroutines, +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, // giving each goroutine its own copy helped to avoid lock contention. // As of Go 1.12, using Copy is no longer necessary to avoid lock contention. // Copy may still be appropriate if the reason for its use is to make -// two copies with different Longest settings. +// two copies with different [Regexp.Longest] settings. func (re *Regexp) Copy() *Regexp { re2 := *re return &re2 } // Compile parses a regular expression and returns, if successful, -// a Regexp object that can be used to match against text. +// a [Regexp] object that can be used to match against text. // // When matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those @@ -132,12 +130,12 @@ func (re *Regexp) Copy() *Regexp { // This so-called leftmost-first matching is the same semantics // that Perl, Python, and other implementations use, although this // package implements it without the expense of backtracking. -// For POSIX leftmost-longest matching, see CompilePOSIX. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. func Compile(expr string) (*Regexp, error) { return compile(expr, syntax.Perl, false) } -// CompilePOSIX is like Compile but restricts the regular expression +// CompilePOSIX is like [Compile] but restricts the regular expression // to POSIX ERE (egrep) syntax and changes the match semantics to // leftmost-longest. // @@ -164,7 +162,7 @@ func CompilePOSIX(expr string) (*Regexp, error) { // That is, when matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those // it chooses a match that is as long as possible. -// This method modifies the Regexp and may not be called concurrently +// This method modifies the [Regexp] and may not be called concurrently // with any other methods. func (re *Regexp) Longest() { re.longest = true @@ -270,7 +268,7 @@ func (re *Regexp) put(m *machine) { matchPool[re.mpool].Put(m) } -// minInputLen walks the regexp to find the minimum length of any matchable input +// minInputLen walks the regexp to find the minimum length of any matchable input. func minInputLen(re *syntax.Regexp) int { switch re.Op { default: @@ -310,7 +308,7 @@ func minInputLen(re *syntax.Regexp) int { } } -// MustCompile is like Compile but panics if the expression cannot be parsed. +// MustCompile is like [Compile] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompile(str string) *Regexp { @@ -321,7 +319,7 @@ func MustCompile(str string) *Regexp { return regexp } -// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed. +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompilePOSIX(str string) *Regexp { @@ -339,13 +337,13 @@ func quote(s string) string { return strconv.Quote(s) } -// NumSubexp returns the number of parenthesized subexpressions in this Regexp. +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. func (re *Regexp) NumSubexp() int { return re.numSubexp } // SubexpNames returns the names of the parenthesized subexpressions -// in this Regexp. The name for the first sub-expression is names[1], +// in this [Regexp]. The name for the first sub-expression is names[1], // so that if m is a match slice, the name for m[i] is SubexpNames()[i]. // Since the Regexp as a whole cannot be named, names[0] is always // the empty string. The slice should not be modified. @@ -521,7 +519,7 @@ func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { return re.prefix, re.prefixComplete } -// MatchReader reports whether the text returned by the RuneReader +// MatchReader reports whether the text returned by the [io.RuneReader] // contains any match of the regular expression re. func (re *Regexp) MatchReader(r io.RuneReader) bool { return re.doMatch(r, nil, "") @@ -541,7 +539,7 @@ func (re *Regexp) Match(b []byte) bool { // MatchReader reports whether the text returned by the RuneReader // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -552,7 +550,7 @@ func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { // MatchString reports whether the string s // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchString(pattern string, s string) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -563,7 +561,7 @@ func MatchString(pattern string, s string) (matched bool, err error) { // Match reports whether the byte slice b // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func Match(pattern string, b []byte) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -572,9 +570,9 @@ func Match(pattern string, b []byte) (matched bool, err error) { return re.Match(b), nil } -// ReplaceAllString returns a copy of src, replacing matches of the Regexp -// with the replacement string repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAllString(src, repl string) string { n := 2 if strings.Contains(repl, "$") { @@ -586,9 +584,9 @@ func (re *Regexp) ReplaceAllString(src, repl string) string { return string(b) } -// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] // with the replacement string repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -596,9 +594,9 @@ func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { } // ReplaceAllStringFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched substring. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -671,9 +669,9 @@ func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst return buf } -// ReplaceAll returns a copy of src, replacing matches of the Regexp -// with the replacement text repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAll(src, repl []byte) []byte { n := 2 if bytes.IndexByte(repl, '$') >= 0 { @@ -689,9 +687,9 @@ func (re *Regexp) ReplaceAll(src, repl []byte) []byte { return b } -// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] // with the replacement bytes repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -699,9 +697,9 @@ func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { } // ReplaceAllFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched byte slice. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -845,7 +843,7 @@ func (re *Regexp) FindIndex(b []byte) (loc []int) { // FindString returns a string holding the text of the leftmost match in s of the regular // expression. If there is no match, the return value is an empty string, // but it will also be empty if the regular expression successfully matches -// an empty string. Use FindStringIndex or FindStringSubmatch if it is +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is // necessary to distinguish these cases. func (re *Regexp) FindString(s string) string { var dstCap [2]int @@ -870,7 +868,7 @@ func (re *Regexp) FindStringIndex(s string) (loc []int) { // FindReaderIndex returns a two-element slice of integers defining the // location of the leftmost match of the regular expression in text read from -// the RuneReader. The match text was found in the input stream at +// the [io.RuneReader]. The match text was found in the input stream at // byte offset loc[0] through loc[1]-1. // A return value of nil indicates no match. func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { @@ -904,7 +902,7 @@ func (re *Regexp) FindSubmatch(b []byte) [][]byte { // Expand appends template to dst and returns the result; during the // append, Expand replaces variables in the template with corresponding // matches drawn from src. The match slice should have been returned by -// FindSubmatchIndex. +// [Regexp.FindSubmatchIndex]. // // In the template, a variable is denoted by a substring of the form // $name or ${name}, where name is a non-empty sequence of letters, @@ -922,7 +920,7 @@ func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) [ return re.expand(dst, string(template), src, "", match) } -// ExpandString is like Expand but the template and source are strings. +// ExpandString is like [Regexp.Expand] but the template and source are strings. // It appends to and returns a byte slice in order to give the calling // code control over allocation. func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { @@ -1067,7 +1065,7 @@ func (re *Regexp) FindStringSubmatchIndex(s string) []int { // FindReaderSubmatchIndex returns a slice holding the index pairs // identifying the leftmost match of the regular expression of text read by -// the RuneReader, and the matches, if any, of its subexpressions, as defined +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined // by the 'Submatch' and 'Index' descriptions in the package comment. A // return value of nil indicates no match. func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { @@ -1076,7 +1074,7 @@ func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { const startSize = 10 // The size at which to start a slice in the 'All' routines. -// FindAll is the 'All' version of Find; it returns a slice of all successive +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive // matches of the expression, as defined by the 'All' description in the // package comment. // A return value of nil indicates no match. @@ -1094,7 +1092,7 @@ func (re *Regexp) FindAll(b []byte, n int) [][]byte { return result } -// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1112,7 +1110,7 @@ func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { return result } -// FindAllString is the 'All' version of FindString; it returns a slice of all +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1130,7 +1128,7 @@ func (re *Regexp) FindAllString(s string, n int) []string { return result } -// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a // slice of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1148,7 +1146,7 @@ func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { return result } -// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice // of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1172,7 +1170,7 @@ func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { return result } -// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns // a slice of all successive matches of the expression, as defined by the // 'All' description in the package comment. // A return value of nil indicates no match. @@ -1190,7 +1188,7 @@ func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { return result } -// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it // returns a slice of all successive matches of the expression, as defined by // the 'All' description in the package comment. // A return value of nil indicates no match. @@ -1215,7 +1213,7 @@ func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { } // FindAllStringSubmatchIndex is the 'All' version of -// FindStringSubmatchIndex; it returns a slice of all successive matches of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of // the expression, as defined by the 'All' description in the package // comment. // A return value of nil indicates no match. @@ -1237,8 +1235,8 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { // the substrings between those expression matches. // // The slice returned by this method consists of all the substrings of s -// not contained in the slice returned by FindAllString. When called on an expression -// that contains no metacharacters, it is equivalent to strings.SplitN. +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. // // Example: // @@ -1283,3 +1281,24 @@ func (re *Regexp) Split(s string, n int) []string { return strings } + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go index f6a4b43f7a..877f1043dd 100644 --- a/vendor/github.com/grafana/regexp/syntax/doc.go +++ b/vendor/github.com/grafana/regexp/syntax/doc.go @@ -2,17 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// DO NOT EDIT. This file is generated by mksyntaxgo from the RE2 distribution. +// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT. /* Package syntax parses regular expressions into parse trees and compiles parse trees into programs. Most clients of regular expressions will use the -facilities of package regexp (such as Compile and Match) instead of this package. +facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. # Syntax -The regular expression syntax understood by this package when parsing with the Perl flag is as follows. -Parts of the syntax can be disabled by passing alternate flags to Parse. +The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. +Parts of the syntax can be disabled by passing alternate flags to [Parse]. Single characters: @@ -56,6 +56,7 @@ Grouping: (re) numbered capturing group (submatch) (?Pre) named & numbered capturing group (submatch) + (?re) named & numbered capturing group (submatch) (?:re) non-capturing group (?flags) set flags within current group; non-capturing (?flags:re) set flags during re; non-capturing @@ -136,6 +137,6 @@ ASCII character classes: [[:word:]] word characters (== [0-9A-Za-z_]) [[:xdigit:]] hex digit (== [0-9A-Fa-f]) -Unicode character classes are those in unicode.Categories and unicode.Scripts. +Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. */ package syntax diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go index 3952b2bdd5..1368f5b7ea 100644 --- a/vendor/github.com/grafana/regexp/syntax/op_string.go +++ b/vendor/github.com/grafana/regexp/syntax/op_string.go @@ -4,6 +4,32 @@ package syntax import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpNoMatch-1] + _ = x[OpEmptyMatch-2] + _ = x[OpLiteral-3] + _ = x[OpCharClass-4] + _ = x[OpAnyCharNotNL-5] + _ = x[OpAnyChar-6] + _ = x[OpBeginLine-7] + _ = x[OpEndLine-8] + _ = x[OpBeginText-9] + _ = x[OpEndText-10] + _ = x[OpWordBoundary-11] + _ = x[OpNoWordBoundary-12] + _ = x[OpCapture-13] + _ = x[OpStar-14] + _ = x[OpPlus-15] + _ = x[OpQuest-16] + _ = x[OpRepeat-17] + _ = x[OpConcat-18] + _ = x[OpAlternate-19] + _ = x[opPseudo-128] +} + const ( _Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate" _Op_name_1 = "opPseudo" diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go index b6d348d00c..6ed6491c80 100644 --- a/vendor/github.com/grafana/regexp/syntax/parse.go +++ b/vendor/github.com/grafana/regexp/syntax/parse.go @@ -44,6 +44,7 @@ const ( ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" ErrUnexpectedParen ErrorCode = "unexpected )" ErrNestingDepth ErrorCode = "expression nests too deeply" + ErrLarge ErrorCode = "expression too large" ) func (e ErrorCode) String() string { @@ -159,7 +160,7 @@ func (p *parser) reuse(re *Regexp) { func (p *parser) checkLimits(re *Regexp) { if p.numRunes > maxRunes { - panic(ErrInternalError) + panic(ErrLarge) } p.checkSize(re) p.checkHeight(re) @@ -203,7 +204,7 @@ func (p *parser) checkSize(re *Regexp) { } if p.calcSize(re, true) > maxSize { - panic(ErrInternalError) + panic(ErrLarge) } } @@ -248,9 +249,7 @@ func (p *parser) calcSize(re *Regexp, force bool) int64 { size = int64(re.Max)*sub + int64(re.Max-re.Min) } - if size < 1 { - size = 1 - } + size = max(1, size) p.size[re] = size return size } @@ -381,14 +380,12 @@ func minFoldRune(r rune) rune { if r < minFold || r > maxFold { return r } - min := r + m := r r0 := r for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) { - if min > r { - min = r - } + m = min(m, r) } - return min + return m } // op pushes a regexp with the given op onto the stack @@ -897,8 +894,8 @@ func parse(s string, flags Flags) (_ *Regexp, err error) { panic(r) case nil: // ok - case ErrInternalError: // too big - err = &Error{Code: ErrInternalError, Expr: s} + case ErrLarge: // too big + err = &Error{Code: ErrLarge, Expr: s} case ErrNestingDepth: err = &Error{Code: ErrNestingDepth, Expr: s} } @@ -1158,9 +1155,18 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { // support all three as well. EcmaScript 4 uses only the Python form. // // In both the open source world (via Code Search) and the - // Google source tree, (?Pname) is the dominant form, - // so that's the one we implement. One is enough. - if len(t) > 4 && t[2] == 'P' && t[3] == '<' { + // Google source tree, (?Pname) and (?name) are the + // dominant forms of named captures and both are supported. + startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<' + startsWithName := len(t) > 3 && t[2] == '<' + + if startsWithP || startsWithName { + // position of expr start + exprStartPos := 4 + if startsWithName { + exprStartPos = 3 + } + // Pull out name. end := strings.IndexRune(t, '>') if end < 0 { @@ -1170,8 +1176,8 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { return "", &Error{ErrInvalidNamedCapture, s} } - capture := t[:end+1] // "(?P" - name := t[4:end] // "name" + capture := t[:end+1] // "(?P" or "(?" + name := t[exprStartPos:end] // "name" if err = checkUTF8(name); err != nil { return "", err } @@ -1853,6 +1859,22 @@ func cleanClass(rp *[]rune) []rune { return r[:w] } +// inCharClass reports whether r is in the class. +// It assumes the class has been cleaned by cleanClass. +func inCharClass(r rune, class []rune) bool { + _, ok := sort.Find(len(class)/2, func(i int) int { + lo, hi := class[2*i], class[2*i+1] + if r > hi { + return +1 + } + if r < lo { + return -1 + } + return 0 + }) + return ok +} + // appendLiteral returns the result of appending the literal x to the class r. func appendLiteral(r []rune, x rune, flags Flags) []rune { if flags&FoldCase != 0 { @@ -1937,7 +1959,7 @@ func appendClass(r []rune, x []rune) []rune { return r } -// appendFolded returns the result of appending the case folding of the class x to the class r. +// appendFoldedClass returns the result of appending the case folding of the class x to the class r. func appendFoldedClass(r []rune, x []rune) []rune { for i := 0; i < len(x); i += 2 { r = appendFoldedRange(r, x[i], x[i+1]) diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index 896cdc42c2..6a3705ec8f 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -106,7 +106,9 @@ func EmptyOpContext(r1, r2 rune) EmptyOp { // during the evaluation of the \b and \B zero-width assertions. // These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. func IsWordChar(r rune) bool { - return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' + // Test for lowercase letters first, as these occur more + // frequently than uppercase letters in common cases. + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' } // An Inst is a single instruction in a regular expression program. @@ -189,7 +191,7 @@ Loop: const noMatch = -1 // MatchRune reports whether the instruction matches (and consumes) r. -// It should only be called when i.Op == InstRune. +// It should only be called when i.Op == [InstRune]. func (i *Inst) MatchRune(r rune) bool { return i.MatchRunePos(r) != noMatch } @@ -198,7 +200,7 @@ func (i *Inst) MatchRune(r rune) bool { // If so, MatchRunePos returns the index of the matching rune pair // (or, when len(i.Rune) == 1, rune singleton). // If not, MatchRunePos returns -1. -// MatchRunePos should only be called when i.Op == InstRune. +// MatchRunePos should only be called when i.Op == [InstRune]. func (i *Inst) MatchRunePos(r rune) int { rune := i.Rune @@ -245,7 +247,7 @@ func (i *Inst) MatchRunePos(r rune) int { lo := 0 hi := len(rune) / 2 for lo < hi { - m := lo + (hi-lo)/2 + m := int(uint(lo+hi) >> 1) if c := rune[2*m]; c <= r { if r <= rune[2*m+1] { return m @@ -260,7 +262,7 @@ func (i *Inst) MatchRunePos(r rune) int { // MatchEmptyWidth reports whether the instruction matches // an empty string between the runes before and after. -// It should only be called when i.Op == InstEmptyWidth. +// It should only be called when i.Op == [InstEmptyWidth]. func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { switch EmptyOp(i.Arg) { case EmptyBeginLine: diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go index 3a4d2d201c..8ad3653abb 100644 --- a/vendor/github.com/grafana/regexp/syntax/regexp.go +++ b/vendor/github.com/grafana/regexp/syntax/regexp.go @@ -8,6 +8,7 @@ package syntax // In this package, re is always a *Regexp and r is always a rune. import ( + "slices" "strconv" "strings" "unicode" @@ -75,24 +76,10 @@ func (x *Regexp) Equal(y *Regexp) bool { } case OpLiteral, OpCharClass: - if len(x.Rune) != len(y.Rune) { - return false - } - for i, r := range x.Rune { - if r != y.Rune[i] { - return false - } - } + return slices.Equal(x.Rune, y.Rune) case OpAlternate, OpConcat: - if len(x.Sub) != len(y.Sub) { - return false - } - for i, sub := range x.Sub { - if !sub.Equal(y.Sub[i]) { - return false - } - } + return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) }) case OpStar, OpPlus, OpQuest: if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) { @@ -112,8 +99,165 @@ func (x *Regexp) Equal(y *Regexp) bool { return true } +// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp. +type printFlags uint8 + +const ( + flagI printFlags = 1 << iota // (?i: + flagM // (?m: + flagS // (?s: + flagOff // ) + flagPrec // (?: ) + negShift = 5 // flagI<") @@ -122,15 +266,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { case OpEmptyMatch: b.WriteString(`(?:)`) case OpLiteral: - if re.Flags&FoldCase != 0 { - b.WriteString(`(?i:`) - } for _, r := range re.Rune { escape(b, r, false) } - if re.Flags&FoldCase != 0 { - b.WriteString(`)`) - } case OpCharClass: if len(re.Rune)%2 != 0 { b.WriteString(`[invalid char class]`) @@ -147,7 +285,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i]+1, re.Rune[i+1]-1 escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } @@ -156,25 +296,25 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i], re.Rune[i+1] escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } } b.WriteRune(']') - case OpAnyCharNotNL: - b.WriteString(`(?-s:.)`) - case OpAnyChar: - b.WriteString(`(?s:.)`) + case OpAnyCharNotNL, OpAnyChar: + b.WriteString(`.`) case OpBeginLine: - b.WriteString(`(?m:^)`) + b.WriteString(`^`) case OpEndLine: - b.WriteString(`(?m:$)`) + b.WriteString(`$`) case OpBeginText: b.WriteString(`\A`) case OpEndText: if re.Flags&WasDollar != 0 { - b.WriteString(`(?-m:$)`) + b.WriteString(`$`) } else { b.WriteString(`\z`) } @@ -191,17 +331,17 @@ func writeRegexp(b *strings.Builder, re *Regexp) { b.WriteRune('(') } if re.Sub[0].Op != OpEmptyMatch { - writeRegexp(b, re.Sub[0]) + writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags) } b.WriteRune(')') case OpStar, OpPlus, OpQuest, OpRepeat: - if sub := re.Sub[0]; sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p := printFlags(0) + sub := re.Sub[0] + if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { + p = flagPrec } + writeRegexp(b, sub, p, flags) + switch re.Op { case OpStar: b.WriteRune('*') @@ -225,27 +365,31 @@ func writeRegexp(b *strings.Builder, re *Regexp) { } case OpConcat: for _, sub := range re.Sub { + p := printFlags(0) if sub.Op == OpAlternate { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p = flagPrec } + writeRegexp(b, sub, p, flags) } case OpAlternate: for i, sub := range re.Sub { if i > 0 { b.WriteRune('|') } - writeRegexp(b, sub) + writeRegexp(b, sub, 0, flags) } } } func (re *Regexp) String() string { var b strings.Builder - writeRegexp(&b, re) + var flags map[*Regexp]printFlags + must, cant := calcFlags(re, &flags) + must |= (cant &^ flagI) << negShift + if must != 0 { + must |= flagOff + } + writeRegexp(&b, re, must, flags) return b.String() } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.gitignore index 826caa3902..159a5219b8 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.gitignore +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.gitignore @@ -201,4 +201,5 @@ coverage.txt #vendor vendor/ -.envrc \ No newline at end of file +.envrc +.bin \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.golangci.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.golangci.yml new file mode 100644 index 0000000000..f5298b0076 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/.golangci.yml @@ -0,0 +1,13 @@ +--- + +run: + deadline: 5m + +output: + sort-results: true + +linters-settings: + errcheck: + exclude: errcheck_excludes.txt + gofumpt: + extra-rules: true \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CHANGELOG.md deleted file mode 100644 index 4d7840480f..0000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CHANGELOG.md +++ /dev/null @@ -1,51 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -Types of changes: -- `Added` for new features. -- `Changed` for changes in existing functionality. -- `Deprecated` for soon-to-be removed features. -- `Removed` for now removed features. -- `Fixed` for any bug fixes. -- `Security` in case of vulnerabilities. - -## [Unreleased] - -### Added - -- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f) - -## [v1.1.0] - 2019-09-12 -### Added -- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules. -- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs) -- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao) -- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad) -- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd) -- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for tags extraction - [vporoshok](https://github.com/vporoshok) -- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled - -### Deprecated -- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42) -- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of . - -### Fixed -- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst) -- Numerous documentation fixes. - -## v1.0.0 - 2018-05-08 -### Added -- grpc_auth -- grpc_ctxtags -- grpc_zap -- grpc_logrus -- grpc_opentracing -- grpc_retry -- grpc_validator -- grpc_recovery - -[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD -[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CONTRIBUTING.md index da51fa2c05..adf9e43d5a 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CONTRIBUTING.md +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/CONTRIBUTING.md @@ -7,8 +7,9 @@ Fork, then clone the repo: ```bash git clone git@github.com:your-username/go-grpc-middleware.git ``` -Before submitting a patch, please make sure to run the following make commands to execute the -formatting check, regenerate the proto files, and run the tests and linters: + +Before submitting a patch, please make sure to run the following make commands to execute the formatting check, regenerate the proto files, and run the tests and linters: + ```powershell make fmt : Run formatting across all go files @@ -27,5 +28,4 @@ make all This will `lint`, `fmt`, regenerate proto files and documentation and run all tests. - Push to your fork and open a pull request. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 0000000000..3b13627cdb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/Makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/Makefile index 1868f27c02..9ed35dc87d 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/Makefile +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/Makefile @@ -1,17 +1,15 @@ include .bingo/Variables.mk -SHELL=/bin/bash +SHELL=/usr/bin/env bash -PROVIDER_MODULES ?= $(shell ls -d $(PWD)/providers/*) -MODULES ?= $(PROVIDER_MODULES) $(PWD)/ +PROVIDER_MODULES ?= $(shell find $(PWD)/providers/ -name "go.mod" | grep -v ".bingo" | xargs dirname) +MODULES ?= $(PROVIDER_MODULES) $(PWD) $(PWD)/examples +GO_FILES_TO_FMT ?= $(shell find . -path -prune -o -name '*.go' -print) GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin -PROTOC_VERSION ?= 3.12.3 -PROTOC ?= $(GOBIN)/protoc-$(PROTOC_VERSION) TMP_GOPATH ?= /tmp/gopath - GO111MODULE ?= on export GO111MODULE GOPROXY ?= https://proxy.golang.org @@ -40,26 +38,40 @@ all: fmt proto lint test .PHONY: fmt fmt: $(GOIMPORTS) - @echo "Running fmt for all modules: $(MODULES)" - @$(GOIMPORTS) -local github.com/grpc-ecosystem/go-grpc-middleware/v2 -w $(MODULES) - -.PHONY: proto -proto: ## Generates Go files from Thanos proto files. -proto: $(GOIMPORTS) $(PROTOC) $(PROTOC_GEN_GOGOFAST) ./grpctesting/testpb/test.proto - @GOIMPORTS_BIN="$(GOIMPORTS)" PROTOC_BIN="$(PROTOC)" PROTOC_GEN_GOGOFAST_BIN="$(PROTOC_GEN_GOGOFAST)" scripts/genproto.sh + @echo ">> formatting go code" + @gofmt -s -w $(GO_FILES_TO_FMT) + @for file in $(GO_FILES_TO_FMT) ; do \ + ./goimports.sh "$${file}"; \ + done + @$(GOIMPORTS) -w $(GO_FILES_TO_FMT) .PHONY: test test: @echo "Running tests for all modules: $(MODULES)" + $(MAKE) $(MODULES:%=test_module_%) + +.PHONY: test_module_% +$(MODULES:%=test_module_%): test_module_%: + @echo "Running tests for dir: $*" + cd $* && go test -v -race ./... + +.PHONY: deps +deps: + @echo "Running deps tidy for all modules: $(MODULES)" for dir in $(MODULES) ; do \ - $(MAKE) test_module DIR=$${dir} ; \ + echo "$${dir}"; \ + cd $${dir} && go mod tidy; \ done - ./scripts/test_all.sh -.PHONY: test_module -test_module: - @echo "Running tests for dir: $(DIR)" - cd $(DIR) && go test -v -race ./... +.PHONY: docs +docs: $(MDOX) ## Generates code snippets, doc formatting and check links. + @echo ">> generating docs $(PATH)" + @$(MDOX) fmt -l --links.validate.config-file=$(MDOX_VALIDATE_CONFIG) *.md + +.PHONY: check-docs +check-docs: $(MDOX) ## Generates code snippets and doc formatting and checks links. + @echo ">> checking docs $(PATH)" + @$(MDOX) fmt --check -l --links.validate.config-file=$(MDOX_VALIDATE_CONFIG) *.md .PHONY: lint # PROTIP: @@ -68,24 +80,59 @@ test_module: # --mem-profile-path string Path to memory profile output file # to debug big allocations during linting. lint: ## Runs various static analysis tools against our code. -lint: fmt $(FAILLINT) $(GOLANGCI_LINT) $(MISSPELL) +lint: $(BUF) $(COPYRIGHT) fmt docs + @echo ">> lint proto files" + @$(BUF) lint + + @echo ">> ensuring copyright headers" + @$(COPYRIGHT) $(shell go list -f "{{.Dir}}" ./... | xargs -i find "{}" -name "*.go") + @$(call require_clean_work_tree,"set copyright headers") + @echo ">> ensured all .go files have copyright headers" + @echo "Running lint for all modules: $(MODULES)" - ./scripts/git-tree.sh + @$(call require_clean_work_tree,"before lint") + $(MAKE) $(MODULES:%=lint_module_%) + @$(call require_clean_work_tree,"lint and format files") + +.PHONY: lint_module_% +# PROTIP: +# Add +# --cpu-profile-path string Path to CPU profile output file +# --mem-profile-path string Path to memory profile output file +# to debug big allocations during linting. +lint_module_%: ## Runs various static analysis against our code. +$(MODULES:%=lint_module_%): lint_module_%: $(FAILLINT) $(GOLANGCI_LINT) $(MISSPELL) @echo ">> verifying modules being imported" - @$(FAILLINT) -paths "errors=github.com/pkg/errors,fmt.{Print,Printf,Println}" ./... + @cd $* && $(FAILLINT) -paths "fmt.{Print,Printf,Println},github.com/golang/protobuf=google.golang.org/protobuf" ./... + @echo ">> examining all of the Go files" - @go vet -stdmethods=false ./... + @cd $* && go vet -stdmethods=false ./... + @echo ">> linting all of the Go files GOGC=${GOGC}" - @$(GOLANGCI_LINT) run - @echo ">> detecting misspells" - @find . -type f | grep -v vendor/ | grep -vE '\./\..*' | xargs $(MISSPELL) -error - @echo ">> ensuring generated proto files are up to date" - @$(MAKE) proto - ./scripts/git-tree.sh -$(PROTOC): + @cd $* && $(GOLANGCI_LINT) run + @$(call require_clean_work_tree,"golangci lint") + + +# For protoc naming matters. +PROTOC_GEN_GO_CURRENT := $(TMP_GOPATH)/protoc-gen-go +PROTOC_GEN_GO_GRPC_CURRENT := $(TMP_GOPATH)/protoc-gen-go-grpc +PROTO_TEST_DIR := testing/testpb/v1 + +.PHONY: proto +proto: ## Generate testing protobufs +proto: $(BUF) $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_GRPC) $(PROTO_TEST_DIR)/test.proto @mkdir -p $(TMP_GOPATH) - @echo ">> fetching protoc@${PROTOC_VERSION}" - @PROTOC_VERSION="$(PROTOC_VERSION)" TMP_GOPATH="$(TMP_GOPATH)" scripts/installprotoc.sh - @echo ">> installing protoc@${PROTOC_VERSION}" - @mv -- "$(TMP_GOPATH)/bin/protoc" "$(GOBIN)/protoc-$(PROTOC_VERSION)" - @echo ">> produced $(GOBIN)/protoc-$(PROTOC_VERSION)" + @cp $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_CURRENT) + @cp $(PROTOC_GEN_GO_GRPC) $(PROTOC_GEN_GO_GRPC_CURRENT) + @echo ">> generating $(PROTO_TEST_DIR)" + @PATH=$(GOBIN):$(TMP_GOPATH) $(BUF) alpha protoc \ + -I $(PROTO_TEST_DIR) \ + --go_out=$(PROTO_TEST_DIR)/../ \ + --go-grpc_out=$(PROTO_TEST_DIR)/../ \ + $(PROTO_TEST_DIR)/*.proto + +.PHONY: buf.gen +buf.gen: + @$(BUF) generate \ + --template ./testing/testvalidate/testvalidate.buf.gen.yaml \ + --path ./testing/testvalidate/v1 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/README.md index a88372b333..8c4ac16bcf 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/README.md +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/README.md @@ -1,86 +1,129 @@ -# Go gRPC Middleware V2 +# Go gRPC Middleware -[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware) -[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) -[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware) -[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge) -[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) -[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) -[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) -[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE) +[![go](https://github.com/grpc-ecosystem/go-grpc-middleware/workflows/go/badge.svg?branch=v2)](https://github.com/grpc-ecosystem/go-grpc-middleware/actions?query=branch%3Av2) [![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) [![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/v2) [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) [![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://gophers.slack.com/archives/CNJL30P4P) -[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. - -**NOTE: V2 is under development. If you want to be up to date, or better (!) help us improve go-grpc-middleware please follow on https://github.com/grpc-ecosystem/go-grpc-middleware/issues/275.** +This repository holds [gRPC Go](https://github.com/grpc/grpc-go) Middlewares: interceptors, helpers and utilities. ## Middleware -[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for -Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) -that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement -common patterns: auth, logging, message, validation, retries or monitoring. - -These are generic building blocks that make it easy to build multiple microservices easily. -The purpose of this repository is to act as a go-to point for such reusable functionality. It contains -some of them itself, but also will link to useful external repos. - -`middleware` itself provides support for chaining interceptors, here's an example: - -```go -import "github.com/grpc-ecosystem/go-grpc-middleware/v2" - -myServer := grpc.NewServer( - grpc.StreamInterceptor(middleware.ChainStreamServer( - tags.StreamServerInterceptor(), - opentracing.StreamServerInterceptor(), - prometheus.StreamServerInterceptor, - zap.StreamServerInterceptor(zapLogger), - auth.StreamServerInterceptor(myAuthFunction), - recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(middleware.ChainUnaryServer( - tags.UnaryServerInterceptor(), - opentracing.UnaryServerInterceptor(), - prometheus.UnaryServerInterceptor, - zap.UnaryServerInterceptor(zapLogger), - auth.UnaryServerInterceptor(myAuthFunction), - recovery.UnaryServerInterceptor(), - )), -) +[gRPC Go](https://github.com/grpc/grpc-go) has support for "interceptors", i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement common patterns: auth, logging, tracing, metrics, validation, retries, rate limiting and more, which can be a great generic building blocks that make it easy to build multiple microservices easily. + +Especially for observability signals (logging, tracing, metrics) interceptors offers semi-auto-instrumentation that improves consistency of your observability and allows great correlation techniques (e.g. exemplars and trace ID in logs). Demo-ed in [examples](examples). + +This repository offers ready-to-use middlewares that implements gRPC interceptors with examples. In some cases dedicated projects offer great interceptors, so this repository skips those, and we link them in the [interceptors](#interceptors) list. + +> NOTE: Some middlewares are quite simple to write, so feel free to use this repo as template if you need. It's ok to copy some simpler interceptors if you need more flexibility. This repo can't support all the edge cases you might have. + +Additional great feature of interceptors is the fact we can chain those. For example below you can find example server side chain of interceptors with full observabiliy correlation, auth and panic recovery: + +```go mdox-exec="sed -n '136,151p' examples/server/main.go" + grpcSrv := grpc.NewServer( + grpc.ChainUnaryInterceptor( + // Order matters e.g. tracing interceptor have to create span first for the later exemplars to work. + otelgrpc.UnaryServerInterceptor(), + srvMetrics.UnaryServerInterceptor(grpcprom.WithExemplarFromContext(exemplarFromContext)), + logging.UnaryServerInterceptor(interceptorLogger(rpcLogger), logging.WithFieldsFromContext(logTraceID)), + selector.UnaryServerInterceptor(auth.UnaryServerInterceptor(authFn), selector.MatchFunc(allButHealthZ)), + recovery.UnaryServerInterceptor(recovery.WithRecoveryHandler(grpcPanicRecoveryHandler)), + ), + grpc.ChainStreamInterceptor( + otelgrpc.StreamServerInterceptor(), + srvMetrics.StreamServerInterceptor(grpcprom.WithExemplarFromContext(exemplarFromContext)), + logging.StreamServerInterceptor(interceptorLogger(rpcLogger), logging.WithFieldsFromContext(logTraceID)), + selector.StreamServerInterceptor(auth.StreamServerInterceptor(authFn), selector.MatchFunc(allButHealthZ)), + recovery.StreamServerInterceptor(recovery.WithRecoveryHandler(grpcPanicRecoveryHandler)), + ), ``` +This pattern offers clean and explicit shared functionality for all your gRPC methods. Full, buildable examples can be found in [examples](examples) directory. + ## Interceptors -*Please send a PR to add new interceptors or middleware to this list* +This list covers known interceptors that users use for their Go microservices (both in this repo and external). Click on each to see extended examples in `examples_test.go` (also available in [pkg.go.dev](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/v2)) + +All paths should work with `go get `. #### Auth - * [`auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware -#### Logging - * [`tags`](interceptors/tags) - a library that adds a `Tag` map to context, with data populated from request body - * [`zap`](providers/zap) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. - * [`logrus`](providers/logrus) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. - * [`kit`](providers/kit) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers. - * [`zerolog`](providers/zerolog) - integration of [zerolog](https://github.com/rs/zerolog) logging Library into gRPC handlers. +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth`](interceptors/auth) - a customizable via `AuthFunc` piece of auth middleware. +* (external) [`google.golang.org/grpc/authz`](https://github.com/grpc/grpc-go/blob/master/authz/grpc_authz_server_interceptors.go) - more complex, customizable via auth polices (RBAC like), piece of auth middleware. -#### Monitoring - * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware - * [`opentracing`](interceptors/tracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags +#### Observability + +* Metrics: + * [`github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus`⚡](providers/prometheus) - Prometheus client-side and server-side monitoring middleware. Supports exemplars. Moved from deprecated now [`go-grpc-prometheus`](https://github.com/grpc-ecosystem/go-grpc-prometheus). + * (external) [`go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc`](https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc) - official OpenTelemetry interceptors (metric and tracing). +* Logging with [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging`](interceptors/logging) - a customizable logging middleware offering extended per request logging. It requires logging adapter, see examples in [`interceptors/logging/examples`](interceptors/logging/examples) for `go-kit`, `log`, `logr`, `logrus`, `slog`, `zap` and `zerolog`. + * NOTE: Interceptors with [context](https://pkg.go.dev/context) field injections need to be chained before the adapter function. +* Tracing: + * (external) [`go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc`](https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc) - official OpenTelemetry interceptors (metric and tracing) as used in [example](examples). + * (external) [`github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing`](https://pkg.go.dev/github.com/grpc-ecosystem/go-grpc-middleware@v1.4.0/tracing/opentracing) - deprecated [OpenTracing](http://opentracing.io/) client-side and server-side interceptors if you still need it! #### Client - * [`retry`](interceptors/retry) - a generic gRPC response code retry mechanism, client-side middleware + +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry`](interceptors/retry) - a generic gRPC response code retry mechanism, client-side middleware. + * NOTE: grpc-go has native retries too with advanced policies (https://github.com/grpc/grpc-go/blob/v1.54.0/examples/features/retry/client/main.go) +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout`](interceptors/timeout) - a generic gRPC request timeout, client-side middleware. #### Server - * [`validator`](interceptors/validator) - codegen inbound message validation from `.proto` options - * [`recovery`](interceptors/recovery) - turn panics into gRPC errors - * [`ratelimit`](interceptors/ratelimit) - grpc rate limiting by your own limiter +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/validator`](interceptors/validator) - codegen inbound message validation from `.proto` options. +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery`](interceptors/recovery) - turn panics into gRPC errors (make sure to use those as "last" interceptor, so panic does not skip other interceptors). +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/ratelimit`](interceptors/ratelimit) - grpc rate limiting by your own limiter. +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/protovalidate`](interceptors/protovalidate) - message validation from `.proto` options via [protovalidate-go](https://github.com/bufbuild/protovalidate) + +#### Filtering Interceptor + +* [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/selector`](interceptors/selector) - allow users to select given one or more interceptors in certain condition like matching service method. + +## Prerequisites + +- **[Go](https://golang.org)**: Any one of the **three latest major** [releases](https://golang.org/doc/devel/release.html) are supported. + +## Structure of this repository + +The main interceptors are available in the subdirectories of the [`interceptors` directory](interceptors) e.g. [`interceptors/validator`](interceptors/validator), [`interceptors/auth`](interceptors/auth) or [`interceptors/logging`](interceptors/logging). + +Some interceptors or utilities of interceptors requires opinionated code that depends on larger amount of dependencies. Those are places in `providers` directory as separate Go module, with separate versioning. For example [`providers/prometheus`](providers/prometheus) offer metrics middleware (there is no "interceptor/metrics" at the moment). The separate module, might be a little bit harder to discover and version in your `go.mod`, but it allows core interceptors to be ultra slim in terms of dependencies. + +The [`interceptors` directory](interceptors) also holds generic interceptors that accepts [`Reporter`](interceptors/reporter.go) interface which allows creating your own middlewares with ease. + +As you might notice this repository contains multiple modules with different versions ([Go Module specifics](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories)). Refer to [versions.yaml](versions.yaml) for current modules. We have main module of version 2.x.y and providers modules of lower versions. Since main module is v2, it's module path ends with `v2`: + +``` +go get github.com/grpc-ecosystem/go-grpc-middleware/v2/ +``` + +For providers modules and packages, since they are v1, no version is added to the path e.g. + +``` +go get github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus +``` + +## Changes compared to v1 + +[go-grpc-middleware v1](https://pkg.go.dev/github.com/grpc-ecosystem/go-grpc-middleware) was created near 2015 and became a popular choice for gRPC users. However, many have changed since then. The main changes of v2 compared to v1: + +* Path for separate, multiple Go modules in "providers". This allows to add in future specific providers for certain middlewares if needed. This allows interceptors to be extended without the dependency hell to the core framework (e.g. if use some other metric provider, do you want to import prometheus?). This allows greater extensibility. +* Loggers are removed. The [`interceptors/logging`](interceptors/logging) got simplified and writing adapter for each logger is straightforward. For convenience, we will maintain examples for popular providers in [`interceptors/logging/examples`](interceptors/logging/examples), but those are meant to be copied, not imported. +* `grpc_opentracing` interceptor was removed. This is because tracing instrumentation evolved. OpenTracing is deprecated and OpenTelemetry has now a [superior tracing interceptor](https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc). +* `grpc_ctxtags` interceptor was removed. Custom tags can be added to logging fields using `logging.InjectFields`. Proto option to add logging field was clunky in practice and we don't see any use of it nowadays, so it's removed. +* One of the most powerful interceptor was imported from https://github.com/grpc-ecosystem/go-grpc-prometheus (repo is now deprecated). This consolidation allows easier maintenance, easier use and consistent API. +* Chain interceptors was removed, because `grpc` implemented one. +* Moved to the new proto API (google.golang.org/protobuf). +* All "deciders", so functions that decide what to do based on gRPC service name and method (aka "fullMethodName") are removed (!). Use [`github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/selector`](interceptors/selector) interceptor to select what method, type or service should use what interceptor. +* No more snake case package names. We have now single word meaningful package names. If you have collision in package names we recommend adding grpc prefix e.g. `grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"`. +* All the options (if any) are in the form of `.With