From bfb8050910a7f873787b3c5b89bd68ef057b4761 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 00:30:50 +0000 Subject: [PATCH] Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.20.2 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.17.2 to 2.20.2. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.17.2...v2.20.2) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 26 +- go.sum | 96 +- .../grafana/dskit/backoff/backoff.go | 120 - .../grafana/dskit/multierror/multierror.go | 75 - .../grafana/dskit/runutil/runutil.go | 112 - .../github.com/grafana/e2e/.errcheck-exclude | 1 - vendor/github.com/grafana/e2e/.golangci.yml | 25 - vendor/github.com/grafana/e2e/LICENSE | 201 - vendor/github.com/grafana/e2e/Makefile | 14 - vendor/github.com/grafana/e2e/README.md | 7 - .../grafana/e2e/composite_service.go | 92 - vendor/github.com/grafana/e2e/logger.go | 44 - vendor/github.com/grafana/e2e/metrics.go | 158 - .../github.com/grafana/e2e/metrics_options.go | 58 - vendor/github.com/grafana/e2e/scenario.go | 270 - vendor/github.com/grafana/e2e/service.go | 748 --- vendor/github.com/grafana/e2e/util.go | 264 - vendor/github.com/grafana/regexp/.gitignore | 15 - vendor/github.com/grafana/regexp/LICENSE | 27 - vendor/github.com/grafana/regexp/README.md | 12 - vendor/github.com/grafana/regexp/backtrack.go | 365 -- vendor/github.com/grafana/regexp/exec.go | 554 -- vendor/github.com/grafana/regexp/onepass.go | 500 -- vendor/github.com/grafana/regexp/regexp.go | 1304 ----- .../grafana/regexp/syntax/compile.go | 296 -- .../github.com/grafana/regexp/syntax/doc.go | 142 - .../grafana/regexp/syntax/make_perl_groups.pl | 113 - .../grafana/regexp/syntax/op_string.go | 52 - .../github.com/grafana/regexp/syntax/parse.go | 2136 -------- .../grafana/regexp/syntax/perl_groups.go | 134 - .../github.com/grafana/regexp/syntax/prog.go | 349 -- .../grafana/regexp/syntax/regexp.go | 464 -- .../grafana/regexp/syntax/simplify.go | 151 - vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 57 + .../github.com/onsi/ginkgo/v2/CONTRIBUTING.md | 10 +- vendor/github.com/onsi/ginkgo/v2/Makefile | 16 + .../ginkgo/v2/ginkgo/build/build_command.go | 15 +- .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 14 +- .../ginkgo/v2/ginkgo/watch/package_hash.go | 9 + .../onsi/ginkgo/v2/internal/suite.go | 7 +- .../ginkgo/v2/reporters/default_reporter.go | 10 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 1 + vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 8 +- .../github.com/onsi/ginkgo/v2/types/config.go | 21 +- .../onsi/ginkgo/v2/types/label_filter.go | 229 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 22 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../gomega/matchers/have_exact_elements.go | 7 +- .../bipartitegraph/bipartitegraphmatching.go | 7 + .../github.com/prometheus/prometheus/LICENSE | 201 - .../github.com/prometheus/prometheus/NOTICE | 108 - .../prometheus/model/labels/labels.go | 489 -- .../prometheus/model/labels/labels_common.go | 217 - .../model/labels/labels_dedupelabels.go | 807 --- .../model/labels/labels_stringlabels.go | 701 --- .../prometheus/model/labels/matcher.go | 170 - .../prometheus/model/labels/regexp.go | 942 ---- .../prometheus/model/labels/sharding.go | 47 - .../model/labels/sharding_dedupelabels.go | 52 - .../model/labels/sharding_stringlabels.go | 54 - .../prometheus/model/labels/test_utils.go | 87 - .../prometheus/prometheus/prompb/README.md | 9 - .../prometheus/prometheus/prompb/buf.lock | 10 - .../prometheus/prometheus/prompb/buf.yaml | 21 - .../prometheus/prometheus/prompb/custom.go | 39 - .../prometheus/prometheus/prompb/remote.pb.go | 1702 ------- .../prometheus/prometheus/prompb/remote.proto | 88 - .../prometheus/prometheus/prompb/types.pb.go | 4440 ----------------- .../prometheus/prometheus/prompb/types.proto | 187 - .../prometheus/tsdb/errors/errors.go | 104 - vendor/golang.org/x/crypto/LICENSE | 4 +- vendor/golang.org/x/exp/LICENSE | 4 +- .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 515 ++ vendor/golang.org/x/exp/slices/sort.go | 197 + .../golang.org/x/exp/slices/zsortanyfunc.go | 479 ++ .../golang.org/x/exp/slices/zsortordered.go | 481 ++ vendor/golang.org/x/net/LICENSE | 4 +- vendor/golang.org/x/sync/LICENSE | 4 +- vendor/golang.org/x/sys/LICENSE | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../golang.org/x/sys/unix/syscall_darwin.go | 12 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 5 + .../x/sys/unix/zerrors_darwin_arm64.go | 5 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 38 +- .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 48 + .../x/sys/unix/zsyscall_darwin_amd64.s | 10 + .../x/sys/unix/zsyscall_darwin_arm64.go | 48 + .../x/sys/unix/zsyscall_darwin_arm64.s | 10 + .../golang.org/x/sys/unix/zsyscall_linux.go | 16 + .../x/sys/unix/zsyscall_openbsd_386.go | 24 + .../x/sys/unix/zsyscall_openbsd_386.s | 5 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 24 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 24 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 24 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 10 +- .../x/sys/windows/security_windows.go | 2 +- .../x/sys/windows/syscall_windows.go | 12 +- .../golang.org/x/sys/windows/types_windows.go | 71 +- .../x/sys/windows/zsyscall_windows.go | 49 +- vendor/golang.org/x/term/LICENSE | 4 +- vendor/golang.org/x/text/LICENSE | 4 +- vendor/golang.org/x/tools/LICENSE | 4 +- vendor/modules.txt | 45 +- 147 files changed, 2822 insertions(+), 19446 deletions(-) delete mode 100644 vendor/github.com/grafana/dskit/backoff/backoff.go delete mode 100644 vendor/github.com/grafana/dskit/multierror/multierror.go delete mode 100644 vendor/github.com/grafana/dskit/runutil/runutil.go delete mode 100644 vendor/github.com/grafana/e2e/.errcheck-exclude delete mode 100644 vendor/github.com/grafana/e2e/.golangci.yml delete mode 100644 vendor/github.com/grafana/e2e/LICENSE delete mode 100644 vendor/github.com/grafana/e2e/Makefile delete mode 100644 vendor/github.com/grafana/e2e/README.md delete mode 100644 vendor/github.com/grafana/e2e/composite_service.go delete mode 100644 vendor/github.com/grafana/e2e/logger.go delete mode 100644 vendor/github.com/grafana/e2e/metrics.go delete mode 100644 vendor/github.com/grafana/e2e/metrics_options.go delete mode 100644 vendor/github.com/grafana/e2e/scenario.go delete mode 100644 vendor/github.com/grafana/e2e/service.go delete mode 100644 vendor/github.com/grafana/e2e/util.go delete mode 100644 vendor/github.com/grafana/regexp/.gitignore delete mode 100644 vendor/github.com/grafana/regexp/LICENSE delete mode 100644 vendor/github.com/grafana/regexp/README.md delete mode 100644 vendor/github.com/grafana/regexp/backtrack.go delete mode 100644 vendor/github.com/grafana/regexp/exec.go delete mode 100644 vendor/github.com/grafana/regexp/onepass.go delete mode 100644 vendor/github.com/grafana/regexp/regexp.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/compile.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/doc.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl delete mode 100644 vendor/github.com/grafana/regexp/syntax/op_string.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/parse.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/perl_groups.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/prog.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/regexp.go delete mode 100644 vendor/github.com/grafana/regexp/syntax/simplify.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/Makefile delete mode 100644 vendor/github.com/prometheus/prometheus/LICENSE delete mode 100644 vendor/github.com/prometheus/prometheus/NOTICE delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_common.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/matcher.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/regexp.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/sharding.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go delete mode 100644 vendor/github.com/prometheus/prometheus/model/labels/test_utils.go delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/README.md delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/buf.lock delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/buf.yaml delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/custom.go delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/remote.pb.go delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/remote.proto delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/types.pb.go delete mode 100644 vendor/github.com/prometheus/prometheus/prompb/types.proto delete mode 100644 vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go diff --git a/go.mod b/go.mod index c33ae488..95e52199 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,9 @@ require ( github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorilla/mux v1.8.0 github.com/grafana/dskit v0.0.0-20231012002814-3b80e3b2a51c - github.com/grafana/e2e v0.1.1 github.com/jedib0t/go-pretty/v6 v6.4.6 - github.com/onsi/ginkgo/v2 v2.17.2 - github.com/onsi/gomega v1.33.1 + github.com/onsi/ginkgo/v2 v2.20.2 + github.com/onsi/gomega v1.34.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/common v0.55.0 @@ -39,6 +38,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -58,10 +58,9 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/imdario/mergo v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -80,7 +79,6 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/prometheus v0.53.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect @@ -95,16 +93,16 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect - golang.org/x/net v0.27.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect diff --git a/go.sum b/go.sum index f62e2ac1..4a92041f 100644 --- a/go.sum +++ b/go.sum @@ -4,28 +4,6 @@ github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bE github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= -github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= -github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY= -github.com/aws/aws-sdk-go-v2/config v1.15.1/go.mod h1:MZHGbuW2WnqIOQQBKu2ZkhTjuutZSTnn56TDq4QyydE= -github.com/aws/aws-sdk-go-v2/credentials v1.11.0 h1:gc4Uhs80s60nmLon5Z4JXWinX2BkAGT0YROoUT8h8U4= -github.com/aws/aws-sdk-go-v2/credentials v1.11.0/go.mod h1:EdV1ZFgtZ4XM5RDHWcRWK8H+xW5duNVBqWj2oLu7tRo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 h1:F9Je1nq5YXfMOv6451NHvMf6U0iTWeMnsG0MMIQoUmk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1/go.mod h1:Yph0XsTbQ5GGZ2+mO1a03P/SO9fdX3t1nejIp2tq79g= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 h1:KUErSJgdqmqAPBWAp6Zx9CjL0YXfytXJeXcsWnuCM1c= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7/go.mod h1:oB9nZcxH1cGq7NPGurVJwxrO2vmJ9mmEBayCwcAlmT8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 h1:feVfa9eJonhJiss7g51ikjNB2DrUzbNZNvPL8pw/54k= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1/go.mod h1:K4vz7lRYCyLYpYAMCLObODahFgARdD3YVa0MvQte9Co= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 h1:adr3PfiggFtqgFofAMUFCtdvwzpf3QxPES4ezK4M3iI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8/go.mod h1:wLbQYt36AJqaRZUQiCNXzbtkNigyPfKHrotHuIDiCy8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 h1:B/SPX7J+Y0Yrcjv60Nhbh1gC2uBN47SfN8JYre6Mp4M= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1/go.mod h1:2Hhr9Eh1gJzDatwACX/ozAZ/ljq5vzvPRu5cdu25tzc= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 h1:DyHctRsJIAWIvom1Itb4T84D2jwpIu+KIi3d0SFaswg= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.1/go.mod h1:CvFTucADIx7U/M44vjLs/ZttpQHdpxwK+62+dUGhDeY= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1LceH2lZKMUGZJKiZiM= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w= -github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= -github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/briandowns/openweathermap v0.19.0 h1:nkopLMEtZLxbZI1th6dOG6xkajpszofqf53r5K8mT9k= @@ -41,12 +19,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= -github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= -github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -102,8 +76,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= @@ -114,10 +88,6 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/dskit v0.0.0-20231012002814-3b80e3b2a51c h1:botCtW4mKq6R6r2xzgL+zZpNe4M1q0KNEe/qdCyG/Zk= github.com/grafana/dskit v0.0.0-20231012002814-3b80e3b2a51c/go.mod h1:byPCvaG/pqi33Kq+Wvkp7WhLfmrlyy0RAoYG4yRh01I= -github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= -github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -133,10 +103,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= -github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -145,14 +111,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.23 h1:NleyGQvAn9VQMU+YHVrgV4CX+EPtxPt/78lHOOTncy4= -github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -162,10 +120,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= -github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -189,18 +147,12 @@ github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7f github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.53.0 h1:vOnhpUKrDv954jnVBvhG/ZQJ3kqscnKI+Hbdwo2tAhc= -github.com/prometheus/prometheus v0.53.0/go.mod h1:RZDkzs+ShMBDkAPQkLEaLBXpjmDcjhNxU2drUVPgKUU= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -215,8 +167,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf h1:onQsPyHlq2yIWU+Nfl6yStuqnZuVQQN8FZ8sBb2wqtw= -github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf/go.mod h1:v0NhuxxxUFUPatQcVNSCUkBEVezXzl7LSdaBOZygq98= github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4= github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -256,10 +206,10 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -270,8 +220,8 @@ golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -279,20 +229,20 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -301,8 +251,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -327,8 +277,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/vendor/github.com/grafana/dskit/backoff/backoff.go b/vendor/github.com/grafana/dskit/backoff/backoff.go deleted file mode 100644 index 7ce55647..00000000 --- a/vendor/github.com/grafana/dskit/backoff/backoff.go +++ /dev/null @@ -1,120 +0,0 @@ -package backoff - -import ( - "context" - "flag" - "fmt" - "math/rand" - "time" -) - -// Config configures a Backoff -type Config struct { - MinBackoff time.Duration `yaml:"min_period" category:"advanced"` // start backoff at this level - MaxBackoff time.Duration `yaml:"max_period" category:"advanced"` // increase exponentially to this level - MaxRetries int `yaml:"max_retries" category:"advanced"` // give up after this many; zero means infinite retries -} - -// RegisterFlagsWithPrefix for Config. -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.MinBackoff, prefix+".backoff-min-period", 100*time.Millisecond, "Minimum delay when backing off.") - f.DurationVar(&cfg.MaxBackoff, prefix+".backoff-max-period", 10*time.Second, "Maximum delay when backing off.") - f.IntVar(&cfg.MaxRetries, prefix+".backoff-retries", 10, "Number of times to backoff and retry before failing.") -} - -// Backoff implements exponential backoff with randomized wait times -type Backoff struct { - cfg Config - ctx context.Context - numRetries int - nextDelayMin time.Duration - nextDelayMax time.Duration -} - -// New creates a Backoff object. Pass a Context that can also terminate the operation. -func New(ctx context.Context, cfg Config) *Backoff { - return &Backoff{ - cfg: cfg, - ctx: ctx, - nextDelayMin: cfg.MinBackoff, - nextDelayMax: doubleDuration(cfg.MinBackoff, cfg.MaxBackoff), - } -} - -// Reset the Backoff back to its initial condition -func (b *Backoff) Reset() { - b.numRetries = 0 - b.nextDelayMin = b.cfg.MinBackoff - b.nextDelayMax = doubleDuration(b.cfg.MinBackoff, b.cfg.MaxBackoff) -} - -// Ongoing returns true if caller should keep going -func (b *Backoff) Ongoing() bool { - // Stop if Context has errored or max retry count is exceeded - return b.ctx.Err() == nil && (b.cfg.MaxRetries == 0 || b.numRetries < b.cfg.MaxRetries) -} - -// Err returns the reason for terminating the backoff, or nil if it didn't terminate -func (b *Backoff) Err() error { - if b.ctx.Err() != nil { - return b.ctx.Err() - } - if b.cfg.MaxRetries != 0 && b.numRetries >= b.cfg.MaxRetries { - return fmt.Errorf("terminated after %d retries", b.numRetries) - } - return nil -} - -// NumRetries returns the number of retries so far -func (b *Backoff) NumRetries() int { - return b.numRetries -} - -// Wait sleeps for the backoff time then increases the retry count and backoff time -// Returns immediately if Context is terminated -func (b *Backoff) Wait() { - // Increase the number of retries and get the next delay - sleepTime := b.NextDelay() - - if b.Ongoing() { - timer := time.NewTimer(sleepTime) - defer timer.Stop() - - select { - case <-b.ctx.Done(): - case <-timer.C: - } - } -} - -func (b *Backoff) NextDelay() time.Duration { - b.numRetries++ - - // Handle the edge case where the min and max have the same value - // (or due to some misconfig max is < min) - if b.nextDelayMin >= b.nextDelayMax { - return b.nextDelayMin - } - - // Add a jitter within the next exponential backoff range - sleepTime := b.nextDelayMin + time.Duration(rand.Int63n(int64(b.nextDelayMax-b.nextDelayMin))) - - // Apply the exponential backoff to calculate the next jitter - // range, unless we've already reached the max - if b.nextDelayMax < b.cfg.MaxBackoff { - b.nextDelayMin = doubleDuration(b.nextDelayMin, b.cfg.MaxBackoff) - b.nextDelayMax = doubleDuration(b.nextDelayMax, b.cfg.MaxBackoff) - } - - return sleepTime -} - -func doubleDuration(value time.Duration, max time.Duration) time.Duration { - value = value * 2 - - if value <= max { - return value - } - - return max -} diff --git a/vendor/github.com/grafana/dskit/multierror/multierror.go b/vendor/github.com/grafana/dskit/multierror/multierror.go deleted file mode 100644 index 68b73e20..00000000 --- a/vendor/github.com/grafana/dskit/multierror/multierror.go +++ /dev/null @@ -1,75 +0,0 @@ -// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/2027fb30/pkg/errutil/multierror.go -// Provenance-includes-copyright: The Thanos Authors. - -package multierror - -import ( - "bytes" - "errors" - "fmt" -) - -// MultiError implements the error interface, and contains the errors used to construct it. -type MultiError []error - -// Add adds the error to the error list if it is not nil. -func (es *MultiError) Add(err error) { - if err == nil { - return - } - if merr, ok := err.(nonNilMultiError); ok { - *es = append(*es, merr...) - } else { - *es = append(*es, err) - } -} - -// Err returns the error list as an error or nil if it is empty. -func (es MultiError) Err() error { - if len(es) == 0 { - return nil - } - return nonNilMultiError(es) -} - -// New returns a new MultiError containing supplied errors. -func New(errs ...error) MultiError { - merr := MultiError{} - for _, err := range errs { - merr.Add(err) - } - - return merr -} - -type nonNilMultiError MultiError - -// Error returns a concatenated string of the contained errors. -func (es nonNilMultiError) Error() string { - var buf bytes.Buffer - - if len(es) > 1 { - fmt.Fprintf(&buf, "%d errors: ", len(es)) - } - - for i, err := range es { - if i != 0 { - buf.WriteString("; ") - } - buf.WriteString(err.Error()) - } - - return buf.String() -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored in the MultiError. -func (es nonNilMultiError) Is(target error) bool { - for _, err := range es { - if errors.Is(err, target) { - return true - } - } - return false -} diff --git a/vendor/github.com/grafana/dskit/runutil/runutil.go b/vendor/github.com/grafana/dskit/runutil/runutil.go deleted file mode 100644 index 36c15f61..00000000 --- a/vendor/github.com/grafana/dskit/runutil/runutil.go +++ /dev/null @@ -1,112 +0,0 @@ -// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/runutil/runutil.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Thanos Authors. - -package runutil - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - - "github.com/grafana/dskit/multierror" -) - -// CloseWithErrCapture closes closer and wraps any error with the provided message and assigns it to err. -func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...interface{}) { - merr := multierror.MultiError{} - - merr.Add(*err) - merr.Add(errors.Wrapf(closer.Close(), format, a...)) - - *err = merr.Err() -} - -// CloseWithLogOnErr closes an io.Closer and logs any relevant error from it wrapped with the provided format string and -// args. -func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...interface{}) { - err := closer.Close() - if err == nil || errors.Is(err, os.ErrClosed) { - return - } - - msg := fmt.Sprintf(format, args...) - level.Warn(logger).Log("msg", "detected close error", "err", fmt.Sprintf("%s: %s", msg, err.Error())) -} - -// ExhaustCloseWithErrCapture closes the io.ReadCloser with error capture but exhausts the reader before. -func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a ...interface{}) { - _, copyErr := io.Copy(io.Discard, r) - - CloseWithErrCapture(err, r, format, a...) - - // Prepend the io.Copy error. - merr := multierror.MultiError{} - merr.Add(copyErr) - merr.Add(*err) - - *err = merr.Err() -} - -// DeleteAll deletes all files and directories inside the given -// dir except for the ignoreDirs directories. -// NOTE: DeleteAll is not idempotent. -func DeleteAll(dir string, ignoreDirs ...string) error { - entries, err := os.ReadDir(dir) - if os.IsNotExist(err) { - return nil - } - if err != nil { - return errors.Wrap(err, "read dir") - } - var groupErrs multierror.MultiError - - var matchingIgnores []string - for _, d := range entries { - if !d.IsDir() { - if err := os.RemoveAll(filepath.Join(dir, d.Name())); err != nil { - groupErrs.Add(err) - } - continue - } - - // ignoreDirs might be multi-directory paths. - matchingIgnores = matchingIgnores[:0] - ignore := false - for _, ignoreDir := range ignoreDirs { - id := strings.Split(ignoreDir, "/") - if id[0] == d.Name() { - if len(id) == 1 { - ignore = true - break - } - matchingIgnores = append(matchingIgnores, filepath.Join(id[1:]...)) - } - } - - if ignore { - continue - } - - if len(matchingIgnores) == 0 { - if err := os.RemoveAll(filepath.Join(dir, d.Name())); err != nil { - groupErrs.Add(err) - } - continue - } - if err := DeleteAll(filepath.Join(dir, d.Name()), matchingIgnores...); err != nil { - groupErrs.Add(err) - } - } - - if groupErrs.Err() != nil { - return errors.Wrap(groupErrs.Err(), "delete file/dir") - } - return nil -} diff --git a/vendor/github.com/grafana/e2e/.errcheck-exclude b/vendor/github.com/grafana/e2e/.errcheck-exclude deleted file mode 100644 index 306a6f62..00000000 --- a/vendor/github.com/grafana/e2e/.errcheck-exclude +++ /dev/null @@ -1 +0,0 @@ -(github.com/go-kit/log.Logger).Log diff --git a/vendor/github.com/grafana/e2e/.golangci.yml b/vendor/github.com/grafana/e2e/.golangci.yml deleted file mode 100644 index 2b061157..00000000 --- a/vendor/github.com/grafana/e2e/.golangci.yml +++ /dev/null @@ -1,25 +0,0 @@ -output: - format: line-number - -linters: - enable: - - goimports - - gofmt - - misspell - - revive - -linters-settings: - errcheck: - # path to a file containing a list of functions to exclude from checking - # see https://github.com/kisielk/errcheck#excluding-functions for details - exclude: ./.errcheck-exclude - goimports: - local-prefixes: "github.com/grafana/e2e" - -run: - timeout: 5m - - # List of build tags, all linters use it. - build-tags: - - netgo - - requires_docker diff --git a/vendor/github.com/grafana/e2e/LICENSE b/vendor/github.com/grafana/e2e/LICENSE deleted file mode 100644 index 91b07fe8..00000000 --- a/vendor/github.com/grafana/e2e/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2021 Grafana Labs - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/grafana/e2e/Makefile b/vendor/github.com/grafana/e2e/Makefile deleted file mode 100644 index 8de8ac90..00000000 --- a/vendor/github.com/grafana/e2e/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: test -test: - go test -tags netgo -timeout 30m -race -count 1 ./... - -.PHONY: lint -lint: - go run github.com/client9/misspell/cmd/misspell@v0.3.4 -error README.md LICENSE - - # Configured via .golangci.yml. - go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.43.0 run - -.PHONY: integration -integration: - go test -tags netgo,requires_docker -timeout 30m -v -count=1 ./ diff --git a/vendor/github.com/grafana/e2e/README.md b/vendor/github.com/grafana/e2e/README.md deleted file mode 100644 index 527e58d6..00000000 --- a/vendor/github.com/grafana/e2e/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Grafana E2E - -This library contains utilities that are useful for running E2E tests using docker containers. - -## License - -[Apache 2.0 License](https://github.com/grafana/dskit/blob/main/LICENSE) diff --git a/vendor/github.com/grafana/e2e/composite_service.go b/vendor/github.com/grafana/e2e/composite_service.go deleted file mode 100644 index db840ac9..00000000 --- a/vendor/github.com/grafana/e2e/composite_service.go +++ /dev/null @@ -1,92 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "time" - - "github.com/grafana/dskit/backoff" - "github.com/pkg/errors" -) - -// CompositeHTTPService abstract an higher-level service composed, under the hood, -// by 2+ HTTPService. -type CompositeHTTPService struct { - services []*HTTPService - - // Generic retry backoff. - retryBackoff *backoff.Backoff -} - -func NewCompositeHTTPService(services ...*HTTPService) *CompositeHTTPService { - return &CompositeHTTPService{ - services: services, - retryBackoff: backoff.New(context.Background(), backoff.Config{ - MinBackoff: 300 * time.Millisecond, - MaxBackoff: 600 * time.Millisecond, - MaxRetries: 50, // Sometimes the CI is slow ¯\_(ツ)_/¯ - }), - } -} - -func (s *CompositeHTTPService) NumInstances() int { - return len(s.services) -} - -func (s *CompositeHTTPService) Instances() []*HTTPService { - return s.services -} - -// WaitSumMetrics waits for at least one instance of each given metric names to be present and their sums, returning true -// when passed to given isExpected(...). -func (s *CompositeHTTPService) WaitSumMetrics(isExpected func(sums ...float64) bool, metricNames ...string) error { - return s.WaitSumMetricsWithOptions(isExpected, metricNames) -} - -func (s *CompositeHTTPService) WaitSumMetricsWithOptions(isExpected func(sums ...float64) bool, metricNames []string, opts ...MetricsOption) error { - var ( - sums []float64 - err error - options = buildMetricsOptions(opts) - ) - - for s.retryBackoff.Reset(); s.retryBackoff.Ongoing(); { - sums, err = s.SumMetrics(metricNames, opts...) - if options.WaitMissingMetrics && errors.Is(err, errMissingMetric) { - continue - } - if err != nil { - return err - } - - if isExpected(sums...) { - return nil - } - - s.retryBackoff.Wait() - } - - return fmt.Errorf("unable to find metrics %s with expected values. Last error: %v. Last values: %v", metricNames, err, sums) -} - -// SumMetrics returns the sum of the values of each given metric names. -func (s *CompositeHTTPService) SumMetrics(metricNames []string, opts ...MetricsOption) ([]float64, error) { - sums := make([]float64, len(metricNames)) - - for _, service := range s.services { - partials, err := service.SumMetrics(metricNames, opts...) - if err != nil { - return nil, err - } - - if len(partials) != len(sums) { - return nil, fmt.Errorf("unexpected mismatching sum metrics results (got %d, expected %d)", len(partials), len(sums)) - } - - for i := 0; i < len(sums); i++ { - sums[i] += partials[i] - } - } - - return sums, nil -} diff --git a/vendor/github.com/grafana/e2e/logger.go b/vendor/github.com/grafana/e2e/logger.go deleted file mode 100644 index 5152ed5e..00000000 --- a/vendor/github.com/grafana/e2e/logger.go +++ /dev/null @@ -1,44 +0,0 @@ -package e2e - -import ( - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/go-kit/log" -) - -// Global logger to use in integration tests. We use a global logger to simplify -// writing integration tests and avoiding having to pass the logger instance -// every time. -var logger log.Logger - -func init() { - logger = NewLogger(os.Stdout) -} - -type Logger struct { - w io.Writer -} - -func NewLogger(w io.Writer) *Logger { - return &Logger{ - w: w, - } -} - -func (l *Logger) Log(keyvals ...interface{}) error { - log := strings.Builder{} - log.WriteString(time.Now().Format("15:04:05")) - - for _, v := range keyvals { - log.WriteString(" " + fmt.Sprint(v)) - } - - log.WriteString("\n") - - _, err := l.w.Write([]byte(log.String())) - return err -} diff --git a/vendor/github.com/grafana/e2e/metrics.go b/vendor/github.com/grafana/e2e/metrics.go deleted file mode 100644 index 18378fb4..00000000 --- a/vendor/github.com/grafana/e2e/metrics.go +++ /dev/null @@ -1,158 +0,0 @@ -package e2e - -import ( - "math" - - io_prometheus_client "github.com/prometheus/client_model/go" -) - -func getMetricValue(m *io_prometheus_client.Metric) float64 { - if m.GetGauge() != nil { - return m.GetGauge().GetValue() - } else if m.GetCounter() != nil { - return m.GetCounter().GetValue() - } else if m.GetHistogram() != nil { - return m.GetHistogram().GetSampleSum() - } else if m.GetSummary() != nil { - return m.GetSummary().GetSampleSum() - } else { - return 0 - } -} - -func getMetricCount(m *io_prometheus_client.Metric) float64 { - if m.GetHistogram() != nil { - return float64(m.GetHistogram().GetSampleCount()) - } else if m.GetSummary() != nil { - return float64(m.GetSummary().GetSampleCount()) - } else { - return 0 - } -} - -func getValues(metrics []*io_prometheus_client.Metric, opts MetricsOptions) []float64 { - values := make([]float64, 0, len(metrics)) - for _, m := range metrics { - values = append(values, opts.GetValue(m)) - } - return values -} - -func filterMetrics(metrics []*io_prometheus_client.Metric, opts MetricsOptions) []*io_prometheus_client.Metric { - // If no label matcher is configured, then no filtering should be done. - if len(opts.LabelMatchers) == 0 { - return metrics - } - if len(metrics) == 0 { - return metrics - } - - filtered := make([]*io_prometheus_client.Metric, 0, len(metrics)) - - for _, m := range metrics { - metricLabels := map[string]string{} - for _, lp := range m.GetLabel() { - metricLabels[lp.GetName()] = lp.GetValue() - } - - matches := true - for _, matcher := range opts.LabelMatchers { - if !matcher.Matches(metricLabels[matcher.Name]) { - matches = false - break - } - } - - if !matches { - continue - } - - filtered = append(filtered, m) - } - - return filtered -} - -func SumValues(values []float64) float64 { - sum := 0.0 - for _, v := range values { - sum += v - } - return sum -} - -func EqualsSingle(expected float64) func(float64) bool { - return func(v float64) bool { - return v == expected || (math.IsNaN(v) && math.IsNaN(expected)) - } -} - -// Equals is an isExpected function for WaitSumMetrics that returns true if given single sum is equals to given value. -func Equals(value float64) func(sums ...float64) bool { - return func(sums ...float64) bool { - if len(sums) != 1 { - panic("equals: expected one value") - } - return sums[0] == value || math.IsNaN(sums[0]) && math.IsNaN(value) - } -} - -// Greater is an isExpected function for WaitSumMetrics that returns true if given single sum is greater than given value. -func Greater(value float64) func(sums ...float64) bool { - return func(sums ...float64) bool { - if len(sums) != 1 { - panic("greater: expected one value") - } - return sums[0] > value - } -} - -// GreaterOrEqual is an isExpected function for WaitSumMetrics that returns true if given single sum is greater or equal than given value. -func GreaterOrEqual(value float64) func(sums ...float64) bool { - return func(sums ...float64) bool { - if len(sums) != 1 { - panic("greater: expected one value") - } - return sums[0] >= value - } -} - -// Less is an isExpected function for WaitSumMetrics that returns true if given single sum is less than given value. -func Less(value float64) func(sums ...float64) bool { - return func(sums ...float64) bool { - if len(sums) != 1 { - panic("less: expected one value") - } - return sums[0] < value - } -} - -// EqualsAmongTwo is an isExpected function for WaitSumMetrics that returns true if first sum is equal to the second. -// NOTE: Be careful on scrapes in between of process that changes two metrics. Those are -// usually not atomic. -func EqualsAmongTwo(sums ...float64) bool { - if len(sums) != 2 { - panic("equalsAmongTwo: expected two values") - } - return sums[0] == sums[1] -} - -// GreaterAmongTwo is an isExpected function for WaitSumMetrics that returns true if first sum is greater than second. -// NOTE: Be careful on scrapes in between of process that changes two metrics. Those are -// usually not atomic. -func GreaterAmongTwo(sums ...float64) bool { - if len(sums) != 2 { - panic("greaterAmongTwo: expected two values") - } - return sums[0] > sums[1] -} - -// LessAmongTwo is an isExpected function for WaitSumMetrics that returns true if first sum is smaller than second. -// NOTE: Be careful on scrapes in between of process that changes two metrics. Those are -// usually not atomic. -func LessAmongTwo(sums ...float64) bool { - if len(sums) != 2 { - panic("lessAmongTwo: expected two values") - } - return sums[0] < sums[1] -} diff --git a/vendor/github.com/grafana/e2e/metrics_options.go b/vendor/github.com/grafana/e2e/metrics_options.go deleted file mode 100644 index f52a1634..00000000 --- a/vendor/github.com/grafana/e2e/metrics_options.go +++ /dev/null @@ -1,58 +0,0 @@ -package e2e - -import ( - io_prometheus_client "github.com/prometheus/client_model/go" - "github.com/prometheus/prometheus/model/labels" -) - -var ( - DefaultMetricsOptions = MetricsOptions{ - GetValue: getMetricValue, - WaitMissingMetrics: false, - } -) - -// GetMetricValueFunc defined the signature of a function used to get the metric value. -type GetMetricValueFunc func(m *io_prometheus_client.Metric) float64 - -// MetricsOption defined the signature of a function used to manipulate options. -type MetricsOption func(*MetricsOptions) - -// MetricsOptions is the structure holding all options. -type MetricsOptions struct { - GetValue GetMetricValueFunc - LabelMatchers []*labels.Matcher - WaitMissingMetrics bool - SkipMissingMetrics bool -} - -// WithMetricCount is an option to get the histogram/summary count as metric value. -func WithMetricCount(opts *MetricsOptions) { - opts.GetValue = getMetricCount -} - -// WithLabelMatchers is an option to filter only matching series. -func WithLabelMatchers(matchers ...*labels.Matcher) MetricsOption { - return func(opts *MetricsOptions) { - opts.LabelMatchers = matchers - } -} - -// WithWaitMissingMetrics is an option to wait whenever an expected metric is missing. If this -// option is not enabled, will return error on missing metrics. -func WaitMissingMetrics(opts *MetricsOptions) { - opts.WaitMissingMetrics = true -} - -// SkipWaitMissingMetrics is an option to skip/ignore whenever an expected metric is missing. -func SkipMissingMetrics(opts *MetricsOptions) { - opts.SkipMissingMetrics = true -} - -func buildMetricsOptions(opts []MetricsOption) MetricsOptions { - result := DefaultMetricsOptions - for _, opt := range opts { - opt(&result) - } - return result -} diff --git a/vendor/github.com/grafana/e2e/scenario.go b/vendor/github.com/grafana/e2e/scenario.go deleted file mode 100644 index 9812b2a0..00000000 --- a/vendor/github.com/grafana/e2e/scenario.go +++ /dev/null @@ -1,270 +0,0 @@ -package e2e - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/pkg/errors" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" -) - -const ( - ContainerSharedDir = "/shared" -) - -type Service interface { - Name() string - Start(networkName, dir string) error - WaitReady() error - - // It should be ok to Stop and Kill more than once, with next invokes being noop. - Kill() error - Stop() error -} - -type Scenario struct { - services []Service - - networkName string - sharedDir string -} - -func NewScenario(networkName string) (*Scenario, error) { - s := &Scenario{networkName: networkName} - - var err error - s.sharedDir, err = GetTempDirectory() - if err != nil { - return nil, err - } - - // Force a shutdown in order to cleanup from a spurious situation in case - // the previous tests run didn't cleanup correctly. - s.shutdown() - - args := []string{ - "network", "create", networkName, - } - - if extraArgs := os.Getenv("DOCKER_NETWORK_CREATE_EXTRA_ARGS"); extraArgs != "" { - args = append( - args, - strings.Split(extraArgs, ",")..., - ) - } - - // Setup the docker network. - if out, err := RunCommandAndGetOutput("docker", args...); err != nil { - logger.Log(string(out)) - s.clean() - return nil, errors.Wrapf(err, "create docker network '%s'", networkName) - } - - return s, nil -} - -// SharedDir returns the absolute path of the directory on the host that is shared with all services in docker. -func (s *Scenario) SharedDir() string { - return s.sharedDir -} - -// NetworkName returns the network name that scenario is responsible for. -func (s *Scenario) NetworkName() string { - return s.networkName -} - -func (s *Scenario) isRegistered(name string) bool { - for _, service := range s.services { - if service.Name() == name { - return true - } - } - return false -} - -func (s *Scenario) StartAndWaitReady(services ...Service) error { - if err := s.Start(services...); err != nil { - return err - } - return s.WaitReady(services...) -} - -func (s *Scenario) Start(services ...Service) error { - var ( - wg = sync.WaitGroup{} - startedMx = sync.Mutex{} - started = make([]Service, 0, len(services)) - errsMx = sync.Mutex{} - errs = tsdb_errors.NewMulti() - ) - - // Ensure provided services don't conflict with existing ones. - if err := s.assertNoConflicts(services...); err != nil { - return err - } - - // Start the services concurrently. - wg.Add(len(services)) - - for _, service := range services { - go func(service Service) { - defer wg.Done() - - logger.Log("Starting", service.Name()) - - // Start the service. - if err := service.Start(s.networkName, s.SharedDir()); err != nil { - errsMx.Lock() - errs.Add(err) - errsMx.Unlock() - return - } - - logger.Log("Started", service.Name()) - - startedMx.Lock() - started = append(started, service) - startedMx.Unlock() - }(service) - } - - // Wait until all services have been started. - wg.Wait() - - // Add the successfully started services to the scenario. - s.services = append(s.services, started...) - - return errs.Err() -} - -func (s *Scenario) Stop(services ...Service) error { - for _, service := range services { - if !s.isRegistered(service.Name()) { - return fmt.Errorf("unable to stop service %s because it does not exist", service.Name()) - } - if err := service.Stop(); err != nil { - return err - } - - // Remove the service from the list of services. - for i, entry := range s.services { - if entry.Name() == service.Name() { - s.services = append(s.services[:i], s.services[i+1:]...) - break - } - } - } - return nil -} - -func (s *Scenario) WaitReady(services ...Service) error { - for _, service := range services { - if !s.isRegistered(service.Name()) { - return fmt.Errorf("unable to wait for service %s because it does not exist", service.Name()) - } - if err := service.WaitReady(); err != nil { - return err - } - } - return nil -} - -func (s *Scenario) Close() { - if s == nil { - return - } - s.shutdown() - s.clean() -} - -func (s *Scenario) assertNoConflicts(services ...Service) error { - // Build a map of services already registered. - names := map[string]struct{}{} - for _, service := range s.services { - names[service.Name()] = struct{}{} - } - - // Check if input services conflict with already existing ones or between them. - for _, service := range services { - if _, exists := names[service.Name()]; exists { - return fmt.Errorf("another service with the same name '%s' exists", service.Name()) - } - - names[service.Name()] = struct{}{} - } - - return nil -} - -// TODO(bwplotka): Add comments. -func (s *Scenario) clean() { - if err := os.RemoveAll(s.sharedDir); err != nil { - logger.Log("error while removing sharedDir", s.sharedDir, "err:", err) - } -} - -func (s *Scenario) shutdown() { - // Kill the services concurrently. - wg := sync.WaitGroup{} - wg.Add(len(s.services)) - - for _, srv := range s.services { - go func(service Service) { - defer wg.Done() - - if err := service.Kill(); err != nil { - logger.Log("Unable to kill service", service.Name(), ":", err.Error()) - } - }(srv) - } - - // Wait until all services have been killed. - wg.Wait() - - // Ensure there are no leftover containers. - if out, err := RunCommandAndGetOutput( - "docker", - "ps", - "-a", - "--quiet", - "--filter", - fmt.Sprintf("network=%s", s.networkName), - ); err == nil { - for _, containerID := range strings.Split(string(out), "\n") { - containerID = strings.TrimSpace(containerID) - if containerID == "" { - continue - } - - if out, err = RunCommandAndGetOutput("docker", "rm", "--force", containerID); err != nil { - logger.Log(string(out)) - logger.Log("Unable to cleanup leftover container", containerID, ":", err.Error()) - } - } - } else { - logger.Log(string(out)) - logger.Log("Unable to cleanup leftover containers:", err.Error()) - } - - // Teardown the docker network. In case the network does not exists (ie. this function - // is called during the setup of the scenario) we skip the removal in order to not log - // an error which may be misleading. - if ok, err := existDockerNetwork(s.networkName); ok || err != nil { - if out, err := RunCommandAndGetOutput("docker", "network", "rm", s.networkName); err != nil { - logger.Log(string(out)) - logger.Log("Unable to remove docker network", s.networkName, ":", err.Error()) - } - } -} - -func existDockerNetwork(networkName string) (bool, error) { - out, err := RunCommandAndGetOutput("docker", "network", "ls", "--quiet", "--filter", fmt.Sprintf("name=%s", networkName)) - if err != nil { - logger.Log(string(out)) - logger.Log("Unable to check if docker network", networkName, "exists:", err.Error()) - } - - return strings.TrimSpace(string(out)) != "", nil -} diff --git a/vendor/github.com/grafana/e2e/service.go b/vendor/github.com/grafana/e2e/service.go deleted file mode 100644 index 047ecbcf..00000000 --- a/vendor/github.com/grafana/e2e/service.go +++ /dev/null @@ -1,748 +0,0 @@ -package e2e - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os/exec" - "regexp" - "strconv" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/grafana/dskit/backoff" - "github.com/grafana/dskit/runutil" - "github.com/pkg/errors" - "github.com/prometheus/common/expfmt" -) - -var ( - dockerIPv4PortPattern = regexp.MustCompile(`^\d+\.\d+\.\d+\.\d+:(\d+)$`) - errMissingMetric = errors.New("metric not found") -) - -// ConcreteService represents microservice with optional ports which will be discoverable from docker -// with :. For connecting from test, use `Endpoint` method. -// -// ConcreteService can be reused (started and stopped many time), but it can represent only one running container -// at the time. -type ConcreteService struct { - name string - image string - networkPorts []int - env map[string]string - user string - command *Command - cmd *exec.Cmd - readiness ReadinessProbe - privileged bool - - // Maps container ports to dynamically binded local ports. - networkPortsContainerToLocal map[int]int - - // Generic retry backoff. - retryBackoff *backoff.Backoff - - // docker NetworkName used to start this container. - // If empty it means service is stopped. - usedNetworkName string -} - -func NewConcreteService( - name string, - image string, - command *Command, - readiness ReadinessProbe, - networkPorts ...int, -) *ConcreteService { - return &ConcreteService{ - name: name, - image: image, - networkPorts: networkPorts, - command: command, - networkPortsContainerToLocal: map[int]int{}, - readiness: readiness, - retryBackoff: backoff.New(context.Background(), backoff.Config{ - MinBackoff: 300 * time.Millisecond, - MaxBackoff: 600 * time.Millisecond, - MaxRetries: 100, // Sometimes the CI is slow ¯\_(ツ)_/¯ - }), - } -} - -func (s *ConcreteService) isExpectedRunning() bool { - return s.usedNetworkName != "" -} - -func (s *ConcreteService) Name() string { return s.name } - -// Less often used options. - -func (s *ConcreteService) SetBackoff(cfg backoff.Config) { - s.retryBackoff = backoff.New(context.Background(), cfg) -} - -func (s *ConcreteService) SetEnvVars(env map[string]string) { - s.env = env -} - -func (s *ConcreteService) SetUser(user string) { - s.user = user -} - -func (s *ConcreteService) SetPrivileged(privileged bool) { - s.privileged = privileged -} - -func (s *ConcreteService) Start(networkName, sharedDir string) (err error) { - // In case of any error, if the container was already created, we - // have to cleanup removing it. We ignore the error of the "docker rm" - // because we don't know if the container was created or not. - defer func() { - if err != nil { - _, _ = RunCommandAndGetOutput("docker", "rm", "--force", s.name) - } - }() - - s.cmd = exec.Command("docker", s.buildDockerRunArgs(networkName, sharedDir)...) - s.cmd.Stdout = &LinePrefixLogger{prefix: s.name + ": ", logger: logger} - s.cmd.Stderr = &LinePrefixLogger{prefix: s.name + ": ", logger: logger} - if err = s.cmd.Start(); err != nil { - return err - } - s.usedNetworkName = networkName - - // Wait until the container has been started. - if err = s.WaitForRunning(); err != nil { - return err - } - - // Get the dynamic local ports mapped to the container. - for _, containerPort := range s.networkPorts { - var out []byte - - out, err = RunCommandAndGetOutput("docker", "port", s.containerName(), strconv.Itoa(containerPort)) - if err != nil { - // Catch init errors. - if werr := s.WaitForRunning(); werr != nil { - return errors.Wrapf(werr, "failed to get mapping for port as container %s exited: %v", s.containerName(), err) - } - return errors.Wrapf(err, "unable to get mapping for port %d; service: %s; output: %q", containerPort, s.name, out) - } - - localPort, err := parseDockerIPv4Port(string(out)) - if err != nil { - return errors.Wrapf(err, "unable to get mapping for port %d (output: %s); service: %s", containerPort, string(out), s.name) - } - - s.networkPortsContainerToLocal[containerPort] = localPort - } - - logger.Log("Ports for container:", s.containerName(), "Mapping:", s.networkPortsContainerToLocal) - return nil -} - -func (s *ConcreteService) Stop() error { - if !s.isExpectedRunning() { - return nil - } - - logger.Log("Stopping", s.name) - - if out, err := RunCommandAndGetOutput("docker", "stop", "--time=30", s.containerName()); err != nil { - logger.Log(string(out)) - return err - } - s.usedNetworkName = "" - - return s.cmd.Wait() -} - -func (s *ConcreteService) Kill() error { - if !s.isExpectedRunning() { - return nil - } - - logger.Log("Killing", s.name) - - if out, err := RunCommandAndGetOutput("docker", "kill", s.containerName()); err != nil { - logger.Log(string(out)) - return err - } - - // Wait until the container actually stopped. However, this could fail if - // the container already exited, so we just ignore the error. - _, _ = RunCommandAndGetOutput("docker", "wait", s.containerName()) - - s.usedNetworkName = "" - - logger.Log("Killed", s.name) - return nil -} - -// Endpoint returns external (from host perspective) service endpoint (host:port) for given internal port. -// External means that it will be accessible only from host, but not from docker containers. -// -// If your service is not running, this method returns incorrect `stopped` endpoint. -func (s *ConcreteService) Endpoint(port int) string { - if !s.isExpectedRunning() { - return "stopped" - } - - // Map the container port to the local port. - localPort, ok := s.networkPortsContainerToLocal[port] - if !ok { - return "" - } - - // Use an IPv4 address instead of "localhost" hostname because our port mapping assumes IPv4 - // (a port published by a Docker container could be different between IPv4 and IPv6). - return fmt.Sprintf("127.0.0.1:%d", localPort) -} - -// NetworkEndpoint returns internal service endpoint (host:port) for given internal port. -// Internal means that it will be accessible only from docker containers within the network that this -// service is running in. If you configure your local resolver with docker DNS namespace you can access it from host -// as well. Use `Endpoint` for host access. -// -// If your service is not running, use `NetworkEndpointFor` instead. -func (s *ConcreteService) NetworkEndpoint(port int) string { - if s.usedNetworkName == "" { - return "stopped" - } - return s.NetworkEndpointFor(s.usedNetworkName, port) -} - -// NetworkEndpointFor returns internal service endpoint (host:port) for given internal port and network. -// Internal means that it will be accessible only from docker containers within the given network. If you configure -// your local resolver with docker DNS namespace you can access it from host as well. -// -// This method return correct endpoint for the service in any state. -func (s *ConcreteService) NetworkEndpointFor(networkName string, port int) string { - return fmt.Sprintf("%s:%d", NetworkContainerHost(networkName, s.name), port) -} - -func (s *ConcreteService) SetReadinessProbe(probe ReadinessProbe) { - s.readiness = probe -} - -func (s *ConcreteService) Ready() error { - if !s.isExpectedRunning() { - return fmt.Errorf("service %s is stopped", s.Name()) - } - - // Ensure the service has a readiness probe configure. - if s.readiness == nil { - return nil - } - - return s.readiness.Ready(s) -} - -func (s *ConcreteService) containerName() string { - return NetworkContainerHost(s.usedNetworkName, s.name) -} - -func (s *ConcreteService) WaitForRunning() (err error) { - if !s.isExpectedRunning() { - return fmt.Errorf("service %s is stopped", s.Name()) - } - - for s.retryBackoff.Reset(); s.retryBackoff.Ongoing(); { - // Enforce a timeout on the command execution because we've seen some flaky tests - // stuck here. - - var out []byte - out, err = RunCommandWithTimeoutAndGetOutput(5*time.Second, "docker", "inspect", "--format={{json .State.Running}}", s.containerName()) - if err != nil { - s.retryBackoff.Wait() - continue - } - - if out == nil { - err = fmt.Errorf("nil output") - s.retryBackoff.Wait() - continue - } - - str := strings.TrimSpace(string(out)) - if str != "true" { - err = fmt.Errorf("unexpected output: %q", str) - s.retryBackoff.Wait() - continue - } - - return nil - } - - return fmt.Errorf("docker container %s failed to start: %v", s.name, err) -} - -func (s *ConcreteService) WaitReady() (err error) { - if !s.isExpectedRunning() { - return fmt.Errorf("service %s is stopped", s.Name()) - } - - for s.retryBackoff.Reset(); s.retryBackoff.Ongoing(); { - err = s.Ready() - if err == nil { - return nil - } - - s.retryBackoff.Wait() - } - - return fmt.Errorf("the service %s is not ready; err: %v", s.name, err) -} - -func (s *ConcreteService) buildDockerRunArgs(networkName, sharedDir string) []string { - args := []string{"run", "--rm", "--net=" + networkName, "--name=" + networkName + "-" + s.name, "--hostname=" + s.name} - - // If running a dind container, this needs to be privileged. - if s.privileged { - args = append(args, "--privileged") - } - - // For Drone CI users, expire the container after 6 hours using drone-gc - args = append(args, "--label", fmt.Sprintf("io.drone.expires=%d", time.Now().Add(6*time.Hour).Unix())) - - // Mount the shared/ directory into the container - args = append(args, "-v", fmt.Sprintf("%s:%s:z", sharedDir, ContainerSharedDir)) - - // Environment variables - for name, value := range s.env { - args = append(args, "-e", name+"="+value) - } - - if s.user != "" { - args = append(args, "--user", s.user) - } - - // Published ports - for _, port := range s.networkPorts { - args = append(args, "-p", strconv.Itoa(port)) - } - - // Disable entrypoint if required - if s.command != nil && s.command.entrypointDisabled { - args = append(args, "--entrypoint", "") - } - - args = append(args, s.image) - - if s.command != nil { - args = append(args, s.command.cmd) - args = append(args, s.command.args...) - } - - return args -} - -// Exec runs the provided against a the docker container specified by this -// service. It returns the stdout, stderr, and error response from attempting -// to run the command. -func (s *ConcreteService) Exec(command *Command) (string, string, error) { - args := []string{"exec", s.containerName()} - args = append(args, command.cmd) - args = append(args, command.args...) - - cmd := exec.Command("docker", args...) - var stdout bytes.Buffer - cmd.Stdout = &stdout - - var stderr bytes.Buffer - cmd.Stderr = &stderr - - err := cmd.Run() - - return stdout.String(), stderr.String(), err -} - -// NetworkContainerHost return the hostname of the container within the network. This is -// the address a container should use to connect to other containers. -func NetworkContainerHost(networkName, containerName string) string { - return fmt.Sprintf("%s-%s", networkName, containerName) -} - -// NetworkContainerHostPort return the host:port address of a container within the network. -func NetworkContainerHostPort(networkName, containerName string, port int) string { - return fmt.Sprintf("%s-%s:%d", networkName, containerName, port) -} - -type Command struct { - cmd string - args []string - entrypointDisabled bool -} - -func NewCommand(cmd string, args ...string) *Command { - return &Command{ - cmd: cmd, - args: args, - } -} - -func NewCommandWithoutEntrypoint(cmd string, args ...string) *Command { - return &Command{ - cmd: cmd, - args: args, - entrypointDisabled: true, - } -} - -type ReadinessProbe interface { - Ready(service *ConcreteService) (err error) -} - -// HTTPReadinessProbe checks readiness by making HTTP(S) call and checking for expected response status code. -type HTTPReadinessProbe struct { - schema string - port int - path string - expectedStatusRangeStart int - expectedStatusRangeEnd int - expectedContent []string - - // The TLS config to use when issuing the HTTPS request. - clientTLSConfig *tls.Config -} - -func NewHTTPReadinessProbe(port int, path string, expectedStatusRangeStart, expectedStatusRangeEnd int, expectedContent ...string) *HTTPReadinessProbe { - return &HTTPReadinessProbe{ - schema: "http", - port: port, - path: path, - expectedStatusRangeStart: expectedStatusRangeStart, - expectedStatusRangeEnd: expectedStatusRangeEnd, - expectedContent: expectedContent, - } -} - -func NewHTTPSReadinessProbe(port int, path, serverName, clientKeyFile, clientCertFile, rootCertFile string, expectedStatusRangeStart, expectedStatusRangeEnd int, expectedContent ...string) (*HTTPReadinessProbe, error) { - // Load client certificate and private key. - cert, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile) - if err != nil { - return nil, errors.Wrapf(err, "error creating x509 keypair from client cert file %s and client key file %s", clientCertFile, clientKeyFile) - } - - caCert, err := ioutil.ReadFile(rootCertFile) - if err != nil { - return nil, errors.Wrapf(err, "error opening root CA cert file %s", rootCertFile) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - return &HTTPReadinessProbe{ - schema: "https", - port: port, - path: path, - expectedStatusRangeStart: expectedStatusRangeStart, - expectedStatusRangeEnd: expectedStatusRangeEnd, - expectedContent: expectedContent, - clientTLSConfig: &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - ServerName: serverName, - }, - }, nil -} - -func (p *HTTPReadinessProbe) Ready(service *ConcreteService) (err error) { - endpoint := service.Endpoint(p.port) - if endpoint == "" { - return fmt.Errorf("cannot get service endpoint for port %d", p.port) - } else if endpoint == "stopped" { - return errors.New("service has stopped") - } - - res, err := DoGetTLS(p.schema+"://"+endpoint+p.path, p.clientTLSConfig) - if err != nil { - return err - } - - defer runutil.ExhaustCloseWithErrCapture(&err, res.Body, "response readiness") - body, _ := ioutil.ReadAll(res.Body) - - if res.StatusCode < p.expectedStatusRangeStart || res.StatusCode > p.expectedStatusRangeEnd { - return fmt.Errorf("expected code in range: [%v, %v], got status code: %v and body: %v", p.expectedStatusRangeStart, p.expectedStatusRangeEnd, res.StatusCode, string(body)) - } - - for _, expected := range p.expectedContent { - if !strings.Contains(string(body), expected) { - return fmt.Errorf("expected body containing %s, got: %v", expected, string(body)) - } - } - - return nil -} - -// TCPReadinessProbe checks readiness by ensure a TCP connection can be established. -type TCPReadinessProbe struct { - port int -} - -func NewTCPReadinessProbe(port int) *TCPReadinessProbe { - return &TCPReadinessProbe{ - port: port, - } -} - -func (p *TCPReadinessProbe) Ready(service *ConcreteService) (err error) { - endpoint := service.Endpoint(p.port) - if endpoint == "" { - return fmt.Errorf("cannot get service endpoint for port %d", p.port) - } else if endpoint == "stopped" { - return errors.New("service has stopped") - } - - conn, err := net.DialTimeout("tcp", endpoint, time.Second) - if err != nil { - return err - } - - return conn.Close() -} - -// CmdReadinessProbe checks readiness by `Exec`ing a command (within container) which returns 0 to consider status being ready -type CmdReadinessProbe struct { - cmd *Command -} - -func NewCmdReadinessProbe(cmd *Command) *CmdReadinessProbe { - return &CmdReadinessProbe{cmd: cmd} -} - -func (p *CmdReadinessProbe) Ready(service *ConcreteService) error { - _, _, err := service.Exec(p.cmd) - return err -} - -type LinePrefixLogger struct { - prefix string - logger log.Logger -} - -func (w *LinePrefixLogger) Write(p []byte) (n int, err error) { - for _, line := range strings.Split(string(p), "\n") { - // Skip empty lines - line = strings.TrimSpace(line) - if line == "" { - continue - } - - // Write the prefix + line to the wrapped writer - if err := w.logger.Log(w.prefix + line); err != nil { - return 0, err - } - } - - return len(p), nil -} - -// HTTPService represents opinionated microservice with at least HTTP port that as mandatory requirement, -// serves metrics. -type HTTPService struct { - *ConcreteService - - metricsTimeout time.Duration - httpPort int -} - -func NewHTTPService( - name string, - image string, - command *Command, - readiness ReadinessProbe, - httpPort int, - otherPorts ...int, -) *HTTPService { - return &HTTPService{ - ConcreteService: NewConcreteService(name, image, command, readiness, append(otherPorts, httpPort)...), - metricsTimeout: time.Second, - httpPort: httpPort, - } -} - -func (s *HTTPService) SetMetricsTimeout(timeout time.Duration) { - s.metricsTimeout = timeout -} - -func (s *HTTPService) Metrics() (_ string, err error) { - // Map the container port to the local port - localPort := s.networkPortsContainerToLocal[s.httpPort] - - // Fetch metrics. - // Use an IPv4 address instead of "localhost" hostname because our port mapping assumes IPv4 - // (a port published by a Docker container could be different between IPv4 and IPv6). - res, err := DoGetWithTimeout(fmt.Sprintf("http://127.0.0.1:%d/metrics", localPort), s.metricsTimeout) - if err != nil { - return "", err - } - - // Check the status code. - if res.StatusCode < 200 || res.StatusCode >= 300 { - return "", fmt.Errorf("unexpected status code %d while fetching metrics", res.StatusCode) - } - - defer runutil.ExhaustCloseWithErrCapture(&err, res.Body, "metrics response") - body, err := ioutil.ReadAll(res.Body) - - return string(body), err -} - -func (s *HTTPService) HTTPPort() int { - return s.httpPort -} - -func (s *HTTPService) HTTPEndpoint() string { - return s.Endpoint(s.httpPort) -} - -func (s *HTTPService) NetworkHTTPEndpoint() string { - return s.NetworkEndpoint(s.httpPort) -} - -func (s *HTTPService) NetworkHTTPEndpointFor(networkName string) string { - return s.NetworkEndpointFor(networkName, s.httpPort) -} - -// WaitSumMetrics waits for at least one instance of each given metric names to be present and their sums, returning true -// when passed to given isExpected(...). -func (s *HTTPService) WaitSumMetrics(isExpected func(sums ...float64) bool, metricNames ...string) error { - return s.WaitSumMetricsWithOptions(isExpected, metricNames) -} - -func (s *HTTPService) WaitSumMetricsWithOptions(isExpected func(sums ...float64) bool, metricNames []string, opts ...MetricsOption) error { - var ( - sums []float64 - err error - options = buildMetricsOptions(opts) - ) - - for s.retryBackoff.Reset(); s.retryBackoff.Ongoing(); { - sums, err = s.SumMetrics(metricNames, opts...) - if options.WaitMissingMetrics && errors.Is(err, errMissingMetric) { - continue - } - if err != nil { - return err - } - - if isExpected(sums...) { - return nil - } - - s.retryBackoff.Wait() - } - - return fmt.Errorf("unable to find metrics %s with expected values. Last error: %v. Last values: %v", metricNames, err, sums) -} - -// SumMetrics returns the sum of the values of each given metric names. -func (s *HTTPService) SumMetrics(metricNames []string, opts ...MetricsOption) ([]float64, error) { - options := buildMetricsOptions(opts) - sums := make([]float64, len(metricNames)) - - metrics, err := s.Metrics() - if err != nil { - return nil, err - } - - var tp expfmt.TextParser - families, err := tp.TextToMetricFamilies(strings.NewReader(metrics)) - if err != nil { - return nil, err - } - - for i, m := range metricNames { - sums[i] = 0.0 - - // Get the metric family. - mf, ok := families[m] - if !ok { - if options.SkipMissingMetrics { - continue - } - - return nil, errors.Wrapf(errMissingMetric, "metric=%s service=%s", m, s.name) - } - - // Filter metrics. - metrics := filterMetrics(mf.GetMetric(), options) - if len(metrics) == 0 { - if options.SkipMissingMetrics { - continue - } - - return nil, errors.Wrapf(errMissingMetric, "metric=%s service=%s", m, s.name) - } - - sums[i] = SumValues(getValues(metrics, options)) - } - - return sums, nil -} - -// WaitRemovedMetric waits until a metric disappear from the list of metrics exported by the service. -func (s *HTTPService) WaitRemovedMetric(metricName string, opts ...MetricsOption) error { - options := buildMetricsOptions(opts) - - for s.retryBackoff.Reset(); s.retryBackoff.Ongoing(); { - // Fetch metrics. - metrics, err := s.Metrics() - if err != nil { - return err - } - - // Parse metrics. - var tp expfmt.TextParser - families, err := tp.TextToMetricFamilies(strings.NewReader(metrics)) - if err != nil { - return err - } - - // Get the metric family. - mf, ok := families[metricName] - if !ok { - return nil - } - - // Filter metrics. - if len(filterMetrics(mf.GetMetric(), options)) == 0 { - return nil - } - - s.retryBackoff.Wait() - } - - return fmt.Errorf("the metric %s is still exported by %s", metricName, s.name) -} - -// parseDockerIPv4Port parses the input string which is expected to be the output of "docker port" -// command and returns the first IPv4 port found. -func parseDockerIPv4Port(out string) (int, error) { - // The "docker port" output may be multiple lines if both IPv4 and IPv6 are supported, - // so we need to parse each line. - for _, line := range strings.Split(out, "\n") { - matches := dockerIPv4PortPattern.FindStringSubmatch(strings.TrimSpace(line)) - if len(matches) != 2 { - continue - } - - port, err := strconv.Atoi(matches[1]) - if err != nil { - continue - } - - return port, nil - } - - // We've not been able to parse the output format. - return 0, errors.New("unknown output format") -} diff --git a/vendor/github.com/grafana/e2e/util.go b/vendor/github.com/grafana/e2e/util.go deleted file mode 100644 index 47aa218c..00000000 --- a/vendor/github.com/grafana/e2e/util.go +++ /dev/null @@ -1,264 +0,0 @@ -package e2e - -import ( - "context" - "crypto/tls" - "io" - "io/ioutil" - "math" - "math/rand" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/prompb" -) - -func RunCommandAndGetOutput(name string, args ...string) ([]byte, error) { - cmd := exec.Command(name, args...) - return cmd.CombinedOutput() -} - -func RunCommandWithTimeoutAndGetOutput(timeout time.Duration, name string, args ...string) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - cmd := exec.CommandContext(ctx, name, args...) - return cmd.CombinedOutput() -} - -func EmptyFlags() map[string]string { - return map[string]string{} -} - -func MergeFlags(inputs ...map[string]string) map[string]string { - output := MergeFlagsWithoutRemovingEmpty(inputs...) - - for k, v := range output { - if v == "" { - delete(output, k) - } - } - - return output -} - -func MergeFlagsWithoutRemovingEmpty(inputs ...map[string]string) map[string]string { - output := map[string]string{} - - for _, input := range inputs { - for name, value := range input { - output[name] = value - } - } - - return output -} - -func BuildArgs(flags map[string]string) []string { - args := make([]string, 0, len(flags)) - - for name, value := range flags { - if value != "" { - args = append(args, name+"="+value) - } else { - args = append(args, name) - } - } - - return args -} - -// DoGet performs an HTTP GET request towards the supplied URL and using a -// timeout of 1 second. -func DoGet(url string) (*http.Response, error) { - return doRequestWithTimeout("GET", url, nil, nil, time.Second) -} - -// DoGetWithTimeout performs an HTTP GET request towards the supplied URL and using a -// specified timeout. -func DoGetWithTimeout(url string, timeout time.Duration) (*http.Response, error) { - return doRequestWithTimeout("GET", url, nil, nil, timeout) -} - -// DoGetTLS is like DoGet but allows to configure a TLS config. -func DoGetTLS(url string, tlsConfig *tls.Config) (*http.Response, error) { - return doRequestWithTimeout("GET", url, nil, tlsConfig, time.Second) -} - -// DoPost performs a HTTP POST request towards the supplied URL with an empty -// body and using a timeout of 1 second. -func DoPost(url string) (*http.Response, error) { - return doRequestWithTimeout("POST", url, strings.NewReader(""), nil, time.Second) -} - -func doRequestWithTimeout(method, url string, body io.Reader, tlsConfig *tls.Config, timeout time.Duration) (*http.Response, error) { - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - client := &http.Client{ - Timeout: timeout, - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, - } - - return client.Do(req) -} - -// TimeToMilliseconds returns the input time as milliseconds, using the same -// formula used by Prometheus in order to get the same timestamp when asserting -// on query results. The formula we're mimicking here is Prometheus parseTime(). -// See: https://github.com/prometheus/prometheus/blob/df80dc4d3970121f2f76cba79050983ffb3cdbb0/web/api/v1/api.go#L1690-L1694 -func TimeToMilliseconds(t time.Time) int64 { - // Convert to seconds. - sec := float64(t.Unix()) + float64(t.Nanosecond())/1e9 - - // Parse seconds. - s, ns := math.Modf(sec) - - // Round nanoseconds part. - ns = math.Round(ns*1000) / 1000 - - // Convert to millis. - return (int64(s) * 1e3) + (int64(ns * 1e3)) -} - -func GenerateSeries(name string, ts time.Time, additionalLabels ...prompb.Label) (series []prompb.TimeSeries, vector model.Vector, matrix model.Matrix) { - tsMillis := TimeToMilliseconds(ts) - value := rand.Float64() - - lbls := append( - []prompb.Label{ - {Name: labels.MetricName, Value: name}, - }, - additionalLabels..., - ) - - // Generate the series - series = append(series, prompb.TimeSeries{ - Labels: lbls, - Exemplars: []prompb.Exemplar{ - {Value: value, Timestamp: tsMillis, Labels: []prompb.Label{ - {Name: "trace_id", Value: "1234"}, - }}, - }, - Samples: []prompb.Sample{ - {Value: value, Timestamp: tsMillis}, - }, - }) - - // Generate the expected vector and matrix when querying it - metric := model.Metric{} - metric[labels.MetricName] = model.LabelValue(name) - for _, lbl := range additionalLabels { - metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) - } - - vector = append(vector, &model.Sample{ - Metric: metric, - Value: model.SampleValue(value), - Timestamp: model.Time(tsMillis), - }) - - matrix = append(matrix, &model.SampleStream{ - Metric: metric, - Values: []model.SamplePair{ - { - Timestamp: model.Time(tsMillis), - Value: model.SampleValue(value), - }, - }, - }) - - return -} - -func GenerateNSeries(nSeries, nExemplars int, name func() string, ts time.Time, additionalLabels func() []prompb.Label) (series []prompb.TimeSeries, vector model.Vector) { - tsMillis := TimeToMilliseconds(ts) - - // Generate the series - for i := 0; i < nSeries; i++ { - lbls := []prompb.Label{ - {Name: labels.MetricName, Value: name()}, - } - if additionalLabels != nil { - lbls = append(lbls, additionalLabels()...) - } - - value := rand.Float64() - - exemplars := []prompb.Exemplar{} - if i < nExemplars { - exemplars = []prompb.Exemplar{ - {Value: value, Timestamp: tsMillis, Labels: []prompb.Label{{Name: "trace_id", Value: "1234"}}}, - } - } - - series = append(series, prompb.TimeSeries{ - Labels: lbls, - Samples: []prompb.Sample{ - {Value: value, Timestamp: tsMillis}, - }, - Exemplars: exemplars, - }) - } - - // Generate the expected vector when querying it - for i := 0; i < nSeries; i++ { - metric := model.Metric{} - for _, lbl := range series[i].Labels { - metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) - } - - vector = append(vector, &model.Sample{ - Metric: metric, - Value: model.SampleValue(series[i].Samples[0].Value), - Timestamp: model.Time(tsMillis), - }) - } - return -} - -// GetTempDirectory creates a temporary directory for shared integration -// test files, either in the working directory or a directory referenced by -// the E2E_TEMP_DIR environment variable -func GetTempDirectory() (string, error) { - var ( - dir string - err error - ) - // If a temp dir is referenced, return that - if os.Getenv("E2E_TEMP_DIR") != "" { - dir = os.Getenv("E2E_TEMP_DIR") - } else { - dir, err = os.Getwd() - if err != nil { - return "", err - } - } - - tmpDir, err := ioutil.TempDir(dir, "e2e_integration_test") - if err != nil { - return "", err - } - // Allow use of the temporary directory for testing with non-root - // users. - if err := os.Chmod(tmpDir, 0777); err != nil { - return "", err - } - absDir, err := filepath.Abs(tmpDir) - if err != nil { - _ = os.RemoveAll(tmpDir) - return "", err - } - - return absDir, nil -} diff --git a/vendor/github.com/grafana/regexp/.gitignore b/vendor/github.com/grafana/regexp/.gitignore deleted file mode 100644 index 66fd13c9..00000000 --- a/vendor/github.com/grafana/regexp/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/github.com/grafana/regexp/LICENSE b/vendor/github.com/grafana/regexp/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/grafana/regexp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md deleted file mode 100644 index 756e60dc..00000000 --- a/vendor/github.com/grafana/regexp/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Grafana Go regexp package -This repo is a fork of the upstream Go `regexp` package, with some code optimisations to make it run faster. - -All the optimisations have been submitted upstream, but not yet merged. - -All semantics are the same, and the optimised code passes all tests from upstream. - -The `main` branch is non-optimised: switch over to [`speedup`](https://github.com/grafana/regexp/tree/speedup) branch for the improved code. - -## Benchmarks: - -![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go deleted file mode 100644 index 7c37c66a..00000000 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// backtrack is a regular expression search with submatch -// tracking for small regular expressions and texts. It allocates -// a bit vector with (length of input) * (length of prog) bits, -// to make sure it never explores the same (character position, instruction) -// state multiple times. This limits the search to run in time linear in -// the length of the test. -// -// backtrack is a fast replacement for the NFA code on small -// regexps when onepass cannot be used. - -package regexp - -import ( - "regexp/syntax" - "sync" -) - -// A job is an entry on the backtracker's job stack. It holds -// the instruction pc and the position in the input. -type job struct { - pc uint32 - arg bool - pos int -} - -const ( - visitedBits = 32 - maxBacktrackProg = 500 // len(prog.Inst) <= max - maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits) -) - -// bitState holds state for the backtracker. -type bitState struct { - end int - cap []int - matchcap []int - jobs []job - visited []uint32 - - inputs inputs -} - -var bitStatePool sync.Pool - -func newBitState() *bitState { - b, ok := bitStatePool.Get().(*bitState) - if !ok { - b = new(bitState) - } - return b -} - -func freeBitState(b *bitState) { - b.inputs.clear() - bitStatePool.Put(b) -} - -// maxBitStateLen returns the maximum length of a string to search with -// the backtracker using prog. -func maxBitStateLen(prog *syntax.Prog) int { - if !shouldBacktrack(prog) { - return 0 - } - return maxBacktrackVector / len(prog.Inst) -} - -// shouldBacktrack reports whether the program is too -// long for the backtracker to run. -func shouldBacktrack(prog *syntax.Prog) bool { - return len(prog.Inst) <= maxBacktrackProg -} - -// reset resets the state of the backtracker. -// end is the end position in the input. -// ncap is the number of captures. -func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { - b.end = end - - if cap(b.jobs) == 0 { - b.jobs = make([]job, 0, 256) - } else { - b.jobs = b.jobs[:0] - } - - visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits - if cap(b.visited) < visitedSize { - b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) - } else { - b.visited = b.visited[:visitedSize] - clear(b.visited) // set to 0 - } - - if cap(b.cap) < ncap { - b.cap = make([]int, ncap) - } else { - b.cap = b.cap[:ncap] - } - for i := range b.cap { - b.cap[i] = -1 - } - - if cap(b.matchcap) < ncap { - b.matchcap = make([]int, ncap) - } else { - b.matchcap = b.matchcap[:ncap] - } - for i := range b.matchcap { - b.matchcap[i] = -1 - } -} - -// shouldVisit reports whether the combination of (pc, pos) has not -// been visited yet. -func (b *bitState) shouldVisit(pc uint32, pos int) bool { - n := uint(int(pc)*(b.end+1) + pos) - if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 { - return false - } - b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1)) - return true -} - -// push pushes (pc, pos, arg) onto the job stack if it should be -// visited. -func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) { - // Only check shouldVisit when arg is false. - // When arg is true, we are continuing a previous visit. - if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { - b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos}) - } -} - -// tryBacktrack runs a backtracking search starting at pos. -func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { - longest := re.longest - - b.push(re, pc, pos, false) - for len(b.jobs) > 0 { - l := len(b.jobs) - 1 - // Pop job off the stack. - pc := b.jobs[l].pc - pos := b.jobs[l].pos - arg := b.jobs[l].arg - b.jobs = b.jobs[:l] - - // Optimization: rather than push and pop, - // code that is going to Push and continue - // the loop simply updates ip, p, and arg - // and jumps to CheckAndLoop. We have to - // do the ShouldVisit check that Push - // would have, but we avoid the stack - // manipulation. - goto Skip - CheckAndLoop: - if !b.shouldVisit(pc, pos) { - continue - } - Skip: - - inst := &re.prog.Inst[pc] - - switch inst.Op { - default: - panic("bad inst") - case syntax.InstFail: - panic("unexpected InstFail") - case syntax.InstAlt: - // Cannot just - // b.push(inst.Out, pos, false) - // b.push(inst.Arg, pos, false) - // If during the processing of inst.Out, we encounter - // inst.Arg via another path, we want to process it then. - // Pushing it here will inhibit that. Instead, re-push - // inst with arg==true as a reminder to push inst.Arg out - // later. - if arg { - // Finished inst.Out; try inst.Arg. - arg = false - pc = inst.Arg - goto CheckAndLoop - } else { - b.push(re, pc, pos, true) - pc = inst.Out - goto CheckAndLoop - } - - case syntax.InstAltMatch: - // One opcode consumes runes; the other leads to match. - switch re.prog.Inst[inst.Out].Op { - case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: - // inst.Arg is the match. - b.push(re, inst.Arg, pos, false) - pc = inst.Arg - pos = b.end - goto CheckAndLoop - } - // inst.Out is the match - non-greedy - b.push(re, inst.Out, b.end, false) - pc = inst.Out - goto CheckAndLoop - - case syntax.InstRune: - r, width := i.step(pos) - if !inst.MatchRune(r) { - continue - } - pos += width - pc = inst.Out - goto CheckAndLoop - - case syntax.InstRune1: - r, width := i.step(pos) - if r != inst.Rune[0] { - continue - } - pos += width - pc = inst.Out - goto CheckAndLoop - - case syntax.InstRuneAnyNotNL: - r, width := i.step(pos) - if r == '\n' || r == endOfText { - continue - } - pos += width - pc = inst.Out - goto CheckAndLoop - - case syntax.InstRuneAny: - r, width := i.step(pos) - if r == endOfText { - continue - } - pos += width - pc = inst.Out - goto CheckAndLoop - - case syntax.InstCapture: - if arg { - // Finished inst.Out; restore the old value. - b.cap[inst.Arg] = pos - continue - } else { - if inst.Arg < uint32(len(b.cap)) { - // Capture pos to register, but save old value. - b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done. - b.cap[inst.Arg] = pos - } - pc = inst.Out - goto CheckAndLoop - } - - case syntax.InstEmptyWidth: - flag := i.context(pos) - if !flag.match(syntax.EmptyOp(inst.Arg)) { - continue - } - pc = inst.Out - goto CheckAndLoop - - case syntax.InstNop: - pc = inst.Out - goto CheckAndLoop - - case syntax.InstMatch: - // We found a match. If the caller doesn't care - // where the match is, no point going further. - if len(b.cap) == 0 { - return true - } - - // Record best match so far. - // Only need to check end point, because this entire - // call is only considering one start position. - if len(b.cap) > 1 { - b.cap[1] = pos - } - if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) { - copy(b.matchcap, b.cap) - } - - // If going for first match, we're done. - if !longest { - return true - } - - // If we used the entire text, no longer match is possible. - if pos == b.end { - return true - } - - // Otherwise, continue on in hope of a longer match. - continue - } - } - - return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0 -} - -// backtrack runs a backtracking search of prog on the input starting at pos. -func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int { - startCond := re.cond - if startCond == ^syntax.EmptyOp(0) { // impossible - return nil - } - if startCond&syntax.EmptyBeginText != 0 && pos != 0 { - // Anchored match, past beginning of text. - return nil - } - - b := newBitState() - i, end := b.inputs.init(nil, ib, is) - b.reset(re.prog, end, ncap) - - // Anchored search must start at the beginning of the input - if startCond&syntax.EmptyBeginText != 0 { - if len(b.cap) > 0 { - b.cap[0] = pos - } - if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { - freeBitState(b) - return nil - } - } else { - - // Unanchored search, starting from each possible text position. - // Notice that we have to try the empty string at the end of - // the text, so the loop condition is pos <= end, not pos < end. - // This looks like it's quadratic in the size of the text, - // but we are not clearing visited between calls to TrySearch, - // so no work is duplicated and it ends up still being linear. - width := -1 - for ; pos <= end && width != 0; pos += width { - if len(re.prefix) > 0 { - // Match requires literal prefix; fast search for it. - advance := i.index(re, pos) - if advance < 0 { - freeBitState(b) - return nil - } - pos += advance - } - - if len(b.cap) > 0 { - b.cap[0] = pos - } - if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { - // Match must be leftmost; done. - goto Match - } - _, width = i.step(pos) - } - freeBitState(b) - return nil - } - -Match: - dstCap = append(dstCap, b.matchcap...) - freeBitState(b) - return dstCap -} diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go deleted file mode 100644 index 3fc4b684..00000000 --- a/vendor/github.com/grafana/regexp/exec.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regexp - -import ( - "io" - "regexp/syntax" - "sync" -) - -// A queue is a 'sparse array' holding pending threads of execution. -// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html -type queue struct { - sparse []uint32 - dense []entry -} - -// An entry is an entry on a queue. -// It holds both the instruction pc and the actual thread. -// Some queue entries are just place holders so that the machine -// knows it has considered that pc. Such entries have t == nil. -type entry struct { - pc uint32 - t *thread -} - -// A thread is the state of a single path through the machine: -// an instruction and a corresponding capture array. -// See https://swtch.com/~rsc/regexp/regexp2.html -type thread struct { - inst *syntax.Inst - cap []int -} - -// A machine holds all the state during an NFA simulation for p. -type machine struct { - re *Regexp // corresponding Regexp - p *syntax.Prog // compiled program - q0, q1 queue // two queues for runq, nextq - pool []*thread // pool of available threads - matched bool // whether a match was found - matchcap []int // capture information for the match - - inputs inputs -} - -type inputs struct { - // cached inputs, to avoid allocation - bytes inputBytes - string inputString - reader inputReader -} - -func (i *inputs) newBytes(b []byte) input { - i.bytes.str = b - return &i.bytes -} - -func (i *inputs) newString(s string) input { - i.string.str = s - return &i.string -} - -func (i *inputs) newReader(r io.RuneReader) input { - i.reader.r = r - i.reader.atEOT = false - i.reader.pos = 0 - return &i.reader -} - -func (i *inputs) clear() { - // We need to clear 1 of these. - // Avoid the expense of clearing the others (pointer write barrier). - if i.bytes.str != nil { - i.bytes.str = nil - } else if i.reader.r != nil { - i.reader.r = nil - } else { - i.string.str = "" - } -} - -func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) { - if r != nil { - return i.newReader(r), 0 - } - if b != nil { - return i.newBytes(b), len(b) - } - return i.newString(s), len(s) -} - -func (m *machine) init(ncap int) { - for _, t := range m.pool { - t.cap = t.cap[:ncap] - } - m.matchcap = m.matchcap[:ncap] -} - -// alloc allocates a new thread with the given instruction. -// It uses the free pool if possible. -func (m *machine) alloc(i *syntax.Inst) *thread { - var t *thread - if n := len(m.pool); n > 0 { - t = m.pool[n-1] - m.pool = m.pool[:n-1] - } else { - t = new(thread) - t.cap = make([]int, len(m.matchcap), cap(m.matchcap)) - } - t.inst = i - return t -} - -// A lazyFlag is a lazily-evaluated syntax.EmptyOp, -// for checking zero-width flags like ^ $ \A \z \B \b. -// It records the pair of relevant runes and does not -// determine the implied flags until absolutely necessary -// (most of the time, that means never). -type lazyFlag uint64 - -func newLazyFlag(r1, r2 rune) lazyFlag { - return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2))) -} - -func (f lazyFlag) match(op syntax.EmptyOp) bool { - if op == 0 { - return true - } - r1 := rune(f >> 32) - if op&syntax.EmptyBeginLine != 0 { - if r1 != '\n' && r1 >= 0 { - return false - } - op &^= syntax.EmptyBeginLine - } - if op&syntax.EmptyBeginText != 0 { - if r1 >= 0 { - return false - } - op &^= syntax.EmptyBeginText - } - if op == 0 { - return true - } - r2 := rune(f) - if op&syntax.EmptyEndLine != 0 { - if r2 != '\n' && r2 >= 0 { - return false - } - op &^= syntax.EmptyEndLine - } - if op&syntax.EmptyEndText != 0 { - if r2 >= 0 { - return false - } - op &^= syntax.EmptyEndText - } - if op == 0 { - return true - } - if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) { - op &^= syntax.EmptyWordBoundary - } else { - op &^= syntax.EmptyNoWordBoundary - } - return op == 0 -} - -// match runs the machine over the input starting at pos. -// It reports whether a match was found. -// If so, m.matchcap holds the submatch information. -func (m *machine) match(i input, pos int) bool { - startCond := m.re.cond - if startCond == ^syntax.EmptyOp(0) { // impossible - return false - } - m.matched = false - for i := range m.matchcap { - m.matchcap[i] = -1 - } - runq, nextq := &m.q0, &m.q1 - r, r1 := endOfText, endOfText - width, width1 := 0, 0 - r, width = i.step(pos) - if r != endOfText { - r1, width1 = i.step(pos + width) - } - var flag lazyFlag - if pos == 0 { - flag = newLazyFlag(-1, r) - } else { - flag = i.context(pos) - } - for { - if len(runq.dense) == 0 { - if startCond&syntax.EmptyBeginText != 0 && pos != 0 { - // Anchored match, past beginning of text. - break - } - if m.matched { - // Have match; finished exploring alternatives. - break - } - if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { - // Match requires literal prefix; fast search for it. - advance := i.index(m.re, pos) - if advance < 0 { - break - } - pos += advance - r, width = i.step(pos) - r1, width1 = i.step(pos + width) - } - } - if !m.matched { - if len(m.matchcap) > 0 { - m.matchcap[0] = pos - } - m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil) - } - flag = newLazyFlag(r, r1) - m.step(runq, nextq, pos, pos+width, r, &flag) - if width == 0 { - break - } - if len(m.matchcap) == 0 && m.matched { - // Found a match and not paying attention - // to where it is, so any match will do. - break - } - pos += width - r, width = r1, width1 - if r != endOfText { - r1, width1 = i.step(pos + width) - } - runq, nextq = nextq, runq - } - m.clear(nextq) - return m.matched -} - -// clear frees all threads on the thread queue. -func (m *machine) clear(q *queue) { - for _, d := range q.dense { - if d.t != nil { - m.pool = append(m.pool, d.t) - } - } - q.dense = q.dense[:0] -} - -// step executes one step of the machine, running each of the threads -// on runq and appending new threads to nextq. -// The step processes the rune c (which may be endOfText), -// which starts at position pos and ends at nextPos. -// nextCond gives the setting for the empty-width flags after c. -func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) { - longest := m.re.longest - for j := 0; j < len(runq.dense); j++ { - d := &runq.dense[j] - t := d.t - if t == nil { - continue - } - if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] { - m.pool = append(m.pool, t) - continue - } - i := t.inst - add := false - switch i.Op { - default: - panic("bad inst") - - case syntax.InstMatch: - if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) { - t.cap[1] = pos - copy(m.matchcap, t.cap) - } - if !longest { - // First-match mode: cut off all lower-priority threads. - for _, d := range runq.dense[j+1:] { - if d.t != nil { - m.pool = append(m.pool, d.t) - } - } - runq.dense = runq.dense[:0] - } - m.matched = true - - case syntax.InstRune: - add = i.MatchRune(c) - case syntax.InstRune1: - add = c == i.Rune[0] - case syntax.InstRuneAny: - add = true - case syntax.InstRuneAnyNotNL: - add = c != '\n' - } - if add { - t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t) - } - if t != nil { - m.pool = append(m.pool, t) - } - } - runq.dense = runq.dense[:0] -} - -// add adds an entry to q for pc, unless the q already has such an entry. -// It also recursively adds an entry for all instructions reachable from pc by following -// empty-width conditions satisfied by cond. pos gives the current position -// in the input. -func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread { -Again: - if pc == 0 { - return t - } - if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc { - return t - } - - j := len(q.dense) - q.dense = q.dense[:j+1] - d := &q.dense[j] - d.t = nil - d.pc = pc - q.sparse[pc] = uint32(j) - - i := &m.p.Inst[pc] - switch i.Op { - default: - panic("unhandled") - case syntax.InstFail: - // nothing - case syntax.InstAlt, syntax.InstAltMatch: - t = m.add(q, i.Out, pos, cap, cond, t) - pc = i.Arg - goto Again - case syntax.InstEmptyWidth: - if cond.match(syntax.EmptyOp(i.Arg)) { - pc = i.Out - goto Again - } - case syntax.InstNop: - pc = i.Out - goto Again - case syntax.InstCapture: - if int(i.Arg) < len(cap) { - opos := cap[i.Arg] - cap[i.Arg] = pos - m.add(q, i.Out, pos, cap, cond, nil) - cap[i.Arg] = opos - } else { - pc = i.Out - goto Again - } - case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: - if t == nil { - t = m.alloc(i) - } else { - t.inst = i - } - if len(cap) > 0 && &t.cap[0] != &cap[0] { - copy(t.cap, cap) - } - d.t = t - t = nil - } - return t -} - -type onePassMachine struct { - inputs inputs - matchcap []int -} - -var onePassPool sync.Pool - -func newOnePassMachine() *onePassMachine { - m, ok := onePassPool.Get().(*onePassMachine) - if !ok { - m = new(onePassMachine) - } - return m -} - -func freeOnePassMachine(m *onePassMachine) { - m.inputs.clear() - onePassPool.Put(m) -} - -// doOnePass implements r.doExecute using the one-pass execution engine. -func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int { - startCond := re.cond - if startCond == ^syntax.EmptyOp(0) { // impossible - return nil - } - - m := newOnePassMachine() - if cap(m.matchcap) < ncap { - m.matchcap = make([]int, ncap) - } else { - m.matchcap = m.matchcap[:ncap] - } - - matched := false - for i := range m.matchcap { - m.matchcap[i] = -1 - } - - i, _ := m.inputs.init(ir, ib, is) - - r, r1 := endOfText, endOfText - width, width1 := 0, 0 - r, width = i.step(pos) - if r != endOfText { - r1, width1 = i.step(pos + width) - } - var flag lazyFlag - if pos == 0 { - flag = newLazyFlag(-1, r) - } else { - flag = i.context(pos) - } - pc := re.onepass.Start - inst := &re.onepass.Inst[pc] - // If there is a simple literal prefix, skip over it. - if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && - len(re.prefix) > 0 && i.canCheckPrefix() { - // Match requires literal prefix; fast search for it. - if !i.hasPrefix(re) { - goto Return - } - pos += len(re.prefix) - r, width = i.step(pos) - r1, width1 = i.step(pos + width) - flag = i.context(pos) - pc = int(re.prefixEnd) - } - for { - inst = &re.onepass.Inst[pc] - pc = int(inst.Out) - switch inst.Op { - default: - panic("bad inst") - case syntax.InstMatch: - matched = true - if len(m.matchcap) > 0 { - m.matchcap[0] = 0 - m.matchcap[1] = pos - } - goto Return - case syntax.InstRune: - if !inst.MatchRune(r) { - goto Return - } - case syntax.InstRune1: - if r != inst.Rune[0] { - goto Return - } - case syntax.InstRuneAny: - // Nothing - case syntax.InstRuneAnyNotNL: - if r == '\n' { - goto Return - } - // peek at the input rune to see which branch of the Alt to take - case syntax.InstAlt, syntax.InstAltMatch: - pc = int(onePassNext(inst, r)) - continue - case syntax.InstFail: - goto Return - case syntax.InstNop: - continue - case syntax.InstEmptyWidth: - if !flag.match(syntax.EmptyOp(inst.Arg)) { - goto Return - } - continue - case syntax.InstCapture: - if int(inst.Arg) < len(m.matchcap) { - m.matchcap[inst.Arg] = pos - } - continue - } - if width == 0 { - break - } - flag = newLazyFlag(r, r1) - pos += width - r, width = r1, width1 - if r != endOfText { - r1, width1 = i.step(pos + width) - } - } - -Return: - if !matched { - freeOnePassMachine(m) - return nil - } - - dstCap = append(dstCap, m.matchcap...) - freeOnePassMachine(m) - return dstCap -} - -// doMatch reports whether either r, b or s match the regexp. -func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool { - return re.doExecute(r, b, s, 0, 0, nil) != nil -} - -// doExecute finds the leftmost match in the input, appends the position -// of its subexpressions to dstCap and returns dstCap. -// -// nil is returned if no matches are found and non-nil if matches are found. -func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int { - if dstCap == nil { - // Make sure 'return dstCap' is non-nil. - dstCap = arrayNoInts[:0:0] - } - - if r == nil && len(b)+len(s) < re.minInputLen { - return nil - } - - if re.onepass != nil { - return re.doOnePass(r, b, s, pos, ncap, dstCap) - } - if r == nil && len(b)+len(s) < re.maxBitStateLen { - return re.backtrack(b, s, pos, ncap, dstCap) - } - - m := re.get() - i, _ := m.inputs.init(r, b, s) - - m.init(ncap) - if !m.match(i, pos) { - re.put(m) - return nil - } - - dstCap = append(dstCap, m.matchcap...) - re.put(m) - return dstCap -} - -// arrayNoInts is returned by doExecute match if nil dstCap is passed -// to it with ncap=0. -var arrayNoInts [0]int diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go deleted file mode 100644 index 53cbd958..00000000 --- a/vendor/github.com/grafana/regexp/onepass.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regexp - -import ( - "regexp/syntax" - "slices" - "strings" - "unicode" - "unicode/utf8" -) - -// "One-pass" regexp execution. -// Some regexps can be analyzed to determine that they never need -// backtracking: they are guaranteed to run in one pass over the string -// without bothering to save all the usual NFA state. -// Detect those and execute them more quickly. - -// A onePassProg is a compiled one-pass regular expression program. -// It is the same as syntax.Prog except for the use of onePassInst. -type onePassProg struct { - Inst []onePassInst - Start int // index of start instruction - NumCap int // number of InstCapture insts in re -} - -// A onePassInst is a single instruction in a one-pass regular expression program. -// It is the same as syntax.Inst except for the new 'Next' field. -type onePassInst struct { - syntax.Inst - Next []uint32 -} - -// onePassPrefix returns a literal string that all matches for the -// regexp must start with. Complete is true if the prefix -// is the entire match. Pc is the index of the last rune instruction -// in the string. The onePassPrefix skips over the mandatory -// EmptyBeginText. -func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { - i := &p.Inst[p.Start] - if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { - return "", i.Op == syntax.InstMatch, uint32(p.Start) - } - pc = i.Out - i = &p.Inst[pc] - for i.Op == syntax.InstNop { - pc = i.Out - i = &p.Inst[pc] - } - // Avoid allocation of buffer if prefix is empty. - if iop(i) != syntax.InstRune || len(i.Rune) != 1 { - return "", i.Op == syntax.InstMatch, uint32(p.Start) - } - - // Have prefix; gather characters. - var buf strings.Builder - for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { - buf.WriteRune(i.Rune[0]) - pc, i = i.Out, &p.Inst[i.Out] - } - if i.Op == syntax.InstEmptyWidth && - syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 && - p.Inst[i.Out].Op == syntax.InstMatch { - complete = true - } - return buf.String(), complete, pc -} - -// onePassNext selects the next actionable state of the prog, based on the input character. -// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. -// One of the alternates may ultimately lead without input to end of line. If the instruction -// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. -func onePassNext(i *onePassInst, r rune) uint32 { - next := i.MatchRunePos(r) - if next >= 0 { - return i.Next[next] - } - if i.Op == syntax.InstAltMatch { - return i.Out - } - return 0 -} - -func iop(i *syntax.Inst) syntax.InstOp { - op := i.Op - switch op { - case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: - op = syntax.InstRune - } - return op -} - -// Sparse Array implementation is used as a queueOnePass. -type queueOnePass struct { - sparse []uint32 - dense []uint32 - size, nextIndex uint32 -} - -func (q *queueOnePass) empty() bool { - return q.nextIndex >= q.size -} - -func (q *queueOnePass) next() (n uint32) { - n = q.dense[q.nextIndex] - q.nextIndex++ - return -} - -func (q *queueOnePass) clear() { - q.size = 0 - q.nextIndex = 0 -} - -func (q *queueOnePass) contains(u uint32) bool { - if u >= uint32(len(q.sparse)) { - return false - } - return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u -} - -func (q *queueOnePass) insert(u uint32) { - if !q.contains(u) { - q.insertNew(u) - } -} - -func (q *queueOnePass) insertNew(u uint32) { - if u >= uint32(len(q.sparse)) { - return - } - q.sparse[u] = q.size - q.dense[q.size] = u - q.size++ -} - -func newQueue(size int) (q *queueOnePass) { - return &queueOnePass{ - sparse: make([]uint32, size), - dense: make([]uint32, size), - } -} - -// mergeRuneSets merges two non-intersecting runesets, and returns the merged result, -// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index -// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a -// NextIp array with the single element mergeFailed is returned. -// The code assumes that both inputs contain ordered and non-intersecting rune pairs. -const mergeFailed = uint32(0xffffffff) - -var ( - noRune = []rune{} - noNext = []uint32{mergeFailed} -) - -func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) { - leftLen := len(*leftRunes) - rightLen := len(*rightRunes) - if leftLen&0x1 != 0 || rightLen&0x1 != 0 { - panic("mergeRuneSets odd length []rune") - } - var ( - lx, rx int - ) - merged := make([]rune, 0) - next := make([]uint32, 0) - ok := true - defer func() { - if !ok { - merged = nil - next = nil - } - }() - - ix := -1 - extend := func(newLow *int, newArray *[]rune, pc uint32) bool { - if ix > 0 && (*newArray)[*newLow] <= merged[ix] { - return false - } - merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1]) - *newLow += 2 - ix += 2 - next = append(next, pc) - return true - } - - for lx < leftLen || rx < rightLen { - switch { - case rx >= rightLen: - ok = extend(&lx, leftRunes, leftPC) - case lx >= leftLen: - ok = extend(&rx, rightRunes, rightPC) - case (*rightRunes)[rx] < (*leftRunes)[lx]: - ok = extend(&rx, rightRunes, rightPC) - default: - ok = extend(&lx, leftRunes, leftPC) - } - if !ok { - return noRune, noNext - } - } - return merged, next -} - -// cleanupOnePass drops working memory, and restores certain shortcut instructions. -func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { - for ix, instOriginal := range original.Inst { - switch instOriginal.Op { - case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune: - case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail: - prog.Inst[ix].Next = nil - case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: - prog.Inst[ix].Next = nil - prog.Inst[ix] = onePassInst{Inst: instOriginal} - } - } -} - -// onePassCopy creates a copy of the original Prog, as we'll be modifying it. -func onePassCopy(prog *syntax.Prog) *onePassProg { - p := &onePassProg{ - Start: prog.Start, - NumCap: prog.NumCap, - Inst: make([]onePassInst, len(prog.Inst)), - } - for i, inst := range prog.Inst { - p.Inst[i] = onePassInst{Inst: inst} - } - - // rewrites one or more common Prog constructs that enable some otherwise - // non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at - // ip A, that points to ips B & C. - // A:BC + B:DA => A:BC + B:CD - // A:BC + B:DC => A:DC + B:DC - for pc := range p.Inst { - switch p.Inst[pc].Op { - default: - continue - case syntax.InstAlt, syntax.InstAltMatch: - // A:Bx + B:Ay - p_A_Other := &p.Inst[pc].Out - p_A_Alt := &p.Inst[pc].Arg - // make sure a target is another Alt - instAlt := p.Inst[*p_A_Alt] - if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { - p_A_Alt, p_A_Other = p_A_Other, p_A_Alt - instAlt = p.Inst[*p_A_Alt] - if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { - continue - } - } - instOther := p.Inst[*p_A_Other] - // Analyzing both legs pointing to Alts is for another day - if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch { - // too complicated - continue - } - // simple empty transition loop - // A:BC + B:DA => A:BC + B:DC - p_B_Alt := &p.Inst[*p_A_Alt].Out - p_B_Other := &p.Inst[*p_A_Alt].Arg - patch := false - if instAlt.Out == uint32(pc) { - patch = true - } else if instAlt.Arg == uint32(pc) { - patch = true - p_B_Alt, p_B_Other = p_B_Other, p_B_Alt - } - if patch { - *p_B_Alt = *p_A_Other - } - - // empty transition to common target - // A:BC + B:DC => A:DC + B:DC - if *p_A_Other == *p_B_Alt { - *p_A_Alt = *p_B_Other - } - } - } - return p -} - -var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} -var anyRune = []rune{0, unicode.MaxRune} - -// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt, -// the match engine can always tell which branch to take. The routine may modify -// p if it is turned into a onepass Prog. If it isn't possible for this to be a -// onepass Prog, the Prog nil is returned. makeOnePass is recursive -// to the size of the Prog. -func makeOnePass(p *onePassProg) *onePassProg { - // If the machine is very long, it's not worth the time to check if we can use one pass. - if len(p.Inst) >= 1000 { - return nil - } - - var ( - instQueue = newQueue(len(p.Inst)) - visitQueue = newQueue(len(p.Inst)) - check func(uint32, []bool) bool - onePassRunes = make([][]rune, len(p.Inst)) - ) - - // check that paths from Alt instructions are unambiguous, and rebuild the new - // program as a onepass program - check = func(pc uint32, m []bool) (ok bool) { - ok = true - inst := &p.Inst[pc] - if visitQueue.contains(pc) { - return - } - visitQueue.insert(pc) - switch inst.Op { - case syntax.InstAlt, syntax.InstAltMatch: - ok = check(inst.Out, m) && check(inst.Arg, m) - // check no-input paths to InstMatch - matchOut := m[inst.Out] - matchArg := m[inst.Arg] - if matchOut && matchArg { - ok = false - break - } - // Match on empty goes in inst.Out - if matchArg { - inst.Out, inst.Arg = inst.Arg, inst.Out - matchOut, matchArg = matchArg, matchOut - } - if matchOut { - m[pc] = true - inst.Op = syntax.InstAltMatch - } - - // build a dispatch operator from the two legs of the alt. - onePassRunes[pc], inst.Next = mergeRuneSets( - &onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg) - if len(inst.Next) > 0 && inst.Next[0] == mergeFailed { - ok = false - break - } - case syntax.InstCapture, syntax.InstNop: - ok = check(inst.Out, m) - m[pc] = m[inst.Out] - // pass matching runes back through these no-ops. - onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) - inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) - for i := range inst.Next { - inst.Next[i] = inst.Out - } - case syntax.InstEmptyWidth: - ok = check(inst.Out, m) - m[pc] = m[inst.Out] - onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) - inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) - for i := range inst.Next { - inst.Next[i] = inst.Out - } - case syntax.InstMatch, syntax.InstFail: - m[pc] = inst.Op == syntax.InstMatch - case syntax.InstRune: - m[pc] = false - if len(inst.Next) > 0 { - break - } - instQueue.insert(inst.Out) - if len(inst.Rune) == 0 { - onePassRunes[pc] = []rune{} - inst.Next = []uint32{inst.Out} - break - } - runes := make([]rune, 0) - if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { - r0 := inst.Rune[0] - runes = append(runes, r0, r0) - for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { - runes = append(runes, r1, r1) - } - slices.Sort(runes) - } else { - runes = append(runes, inst.Rune...) - } - onePassRunes[pc] = runes - inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) - for i := range inst.Next { - inst.Next[i] = inst.Out - } - inst.Op = syntax.InstRune - case syntax.InstRune1: - m[pc] = false - if len(inst.Next) > 0 { - break - } - instQueue.insert(inst.Out) - runes := []rune{} - // expand case-folded runes - if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { - r0 := inst.Rune[0] - runes = append(runes, r0, r0) - for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { - runes = append(runes, r1, r1) - } - slices.Sort(runes) - } else { - runes = append(runes, inst.Rune[0], inst.Rune[0]) - } - onePassRunes[pc] = runes - inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) - for i := range inst.Next { - inst.Next[i] = inst.Out - } - inst.Op = syntax.InstRune - case syntax.InstRuneAny: - m[pc] = false - if len(inst.Next) > 0 { - break - } - instQueue.insert(inst.Out) - onePassRunes[pc] = append([]rune{}, anyRune...) - inst.Next = []uint32{inst.Out} - case syntax.InstRuneAnyNotNL: - m[pc] = false - if len(inst.Next) > 0 { - break - } - instQueue.insert(inst.Out) - onePassRunes[pc] = append([]rune{}, anyRuneNotNL...) - inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) - for i := range inst.Next { - inst.Next[i] = inst.Out - } - } - return - } - - instQueue.clear() - instQueue.insert(uint32(p.Start)) - m := make([]bool, len(p.Inst)) - for !instQueue.empty() { - visitQueue.clear() - pc := instQueue.next() - if !check(pc, m) { - p = nil - break - } - } - if p != nil { - for i := range p.Inst { - p.Inst[i].Rune = onePassRunes[i] - } - } - return p -} - -// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog -// can be recharacterized as a one-pass regexp program, or syntax.nil if the -// Prog cannot be converted. For a one pass prog, the fundamental condition that must -// be true is: at any InstAlt, there must be no ambiguity about what branch to take. -func compileOnePass(prog *syntax.Prog) (p *onePassProg) { - if prog.Start == 0 { - return nil - } - // onepass regexp is anchored - if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth || - syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { - return nil - } - // every instruction leading to InstMatch must be EmptyEndText - for _, inst := range prog.Inst { - opOut := prog.Inst[inst.Out].Op - switch inst.Op { - default: - if opOut == syntax.InstMatch { - return nil - } - case syntax.InstAlt, syntax.InstAltMatch: - if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch { - return nil - } - case syntax.InstEmptyWidth: - if opOut == syntax.InstMatch { - if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText { - continue - } - return nil - } - } - } - // Creates a slightly optimized copy of the original Prog - // that cleans up some Prog idioms that block valid onepass programs - p = onePassCopy(prog) - - // checkAmbiguity on InstAlts, build onepass Prog if possible - p = makeOnePass(p) - - if p != nil { - cleanupOnePass(p, prog) - } - return p -} diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go deleted file mode 100644 index d1218ad0..00000000 --- a/vendor/github.com/grafana/regexp/regexp.go +++ /dev/null @@ -1,1304 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package regexp implements regular expression search. -// -// The syntax of the regular expressions accepted is the same -// general syntax used by Perl, Python, and other languages. -// More precisely, it is the syntax accepted by RE2 and described at -// https://golang.org/s/re2syntax, except for \C. -// For an overview of the syntax, see the [regexp/syntax] package. -// -// The regexp implementation provided by this package is -// guaranteed to run in time linear in the size of the input. -// (This is a property not guaranteed by most open source -// implementations of regular expressions.) For more information -// about this property, see -// -// https://swtch.com/~rsc/regexp/regexp1.html -// -// or any book about automata theory. -// -// All characters are UTF-8-encoded code points. -// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence -// is treated as if it encoded utf8.RuneError (U+FFFD). -// -// There are 16 methods of [Regexp] that match a regular expression and identify -// the matched text. Their names are matched by this regular expression: -// -// Find(All)?(String)?(Submatch)?(Index)? -// -// If 'All' is present, the routine matches successive non-overlapping -// matches of the entire expression. Empty matches abutting a preceding -// match are ignored. The return value is a slice containing the successive -// return values of the corresponding non-'All' routine. These routines take -// an extra integer argument, n. If n >= 0, the function returns at most n -// matches/submatches; otherwise, it returns all of them. -// -// If 'String' is present, the argument is a string; otherwise it is a slice -// of bytes; return values are adjusted as appropriate. -// -// If 'Submatch' is present, the return value is a slice identifying the -// successive submatches of the expression. Submatches are matches of -// parenthesized subexpressions (also known as capturing groups) within the -// regular expression, numbered from left to right in order of opening -// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is -// the match of the first parenthesized subexpression, and so on. -// -// If 'Index' is present, matches and submatches are identified by byte index -// pairs within the input string: result[2*n:2*n+2] identifies the indexes of -// the nth submatch. The pair for n==0 identifies the match of the entire -// expression. If 'Index' is not present, the match is identified by the text -// of the match/submatch. If an index is negative or text is nil, it means that -// subexpression did not match any string in the input. For 'String' versions -// an empty string means either no match or an empty match. -// -// There is also a subset of the methods that can be applied to text read -// from a RuneReader: -// -// MatchReader, FindReaderIndex, FindReaderSubmatchIndex -// -// This set may grow. Note that regular expression matches may need to -// examine text beyond the text returned by a match, so the methods that -// match text from a RuneReader may read arbitrarily far into the input -// before returning. -// -// (There are a few other methods that do not match this pattern.) -package regexp - -import ( - "bytes" - "io" - "regexp/syntax" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Regexp is the representation of a compiled regular expression. -// A Regexp is safe for concurrent use by multiple goroutines, -// except for configuration methods, such as [Regexp.Longest]. -type Regexp struct { - expr string // as passed to Compile - prog *syntax.Prog // compiled program - onepass *onePassProg // onepass program or nil - numSubexp int - maxBitStateLen int - subexpNames []string - prefix string // required prefix in unanchored matches - prefixBytes []byte // prefix, as a []byte - prefixRune rune // first rune in prefix - prefixEnd uint32 // pc for last rune in prefix - mpool int // pool for machines - matchcap int // size of recorded match lengths - prefixComplete bool // prefix is the entire regexp - cond syntax.EmptyOp // empty-width conditions required at start of match - minInputLen int // minimum length of the input in bytes - - // This field can be modified by the Longest method, - // but it is otherwise read-only. - longest bool // whether regexp prefers leftmost-longest match -} - -// String returns the source text used to compile the regular expression. -func (re *Regexp) String() string { - return re.expr -} - -// Copy returns a new [Regexp] object copied from re. -// Calling [Regexp.Longest] on one copy does not affect another. -// -// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, -// giving each goroutine its own copy helped to avoid lock contention. -// As of Go 1.12, using Copy is no longer necessary to avoid lock contention. -// Copy may still be appropriate if the reason for its use is to make -// two copies with different [Regexp.Longest] settings. -func (re *Regexp) Copy() *Regexp { - re2 := *re - return &re2 -} - -// Compile parses a regular expression and returns, if successful, -// a [Regexp] object that can be used to match against text. -// -// When matching against text, the regexp returns a match that -// begins as early as possible in the input (leftmost), and among those -// it chooses the one that a backtracking search would have found first. -// This so-called leftmost-first matching is the same semantics -// that Perl, Python, and other implementations use, although this -// package implements it without the expense of backtracking. -// For POSIX leftmost-longest matching, see [CompilePOSIX]. -func Compile(expr string) (*Regexp, error) { - return compile(expr, syntax.Perl, false) -} - -// CompilePOSIX is like [Compile] but restricts the regular expression -// to POSIX ERE (egrep) syntax and changes the match semantics to -// leftmost-longest. -// -// That is, when matching against text, the regexp returns a match that -// begins as early as possible in the input (leftmost), and among those -// it chooses a match that is as long as possible. -// This so-called leftmost-longest matching is the same semantics -// that early regular expression implementations used and that POSIX -// specifies. -// -// However, there can be multiple leftmost-longest matches, with different -// submatch choices, and here this package diverges from POSIX. -// Among the possible leftmost-longest matches, this package chooses -// the one that a backtracking search would have found first, while POSIX -// specifies that the match be chosen to maximize the length of the first -// subexpression, then the second, and so on from left to right. -// The POSIX rule is computationally prohibitive and not even well-defined. -// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details. -func CompilePOSIX(expr string) (*Regexp, error) { - return compile(expr, syntax.POSIX, true) -} - -// Longest makes future searches prefer the leftmost-longest match. -// That is, when matching against text, the regexp returns a match that -// begins as early as possible in the input (leftmost), and among those -// it chooses a match that is as long as possible. -// This method modifies the [Regexp] and may not be called concurrently -// with any other methods. -func (re *Regexp) Longest() { - re.longest = true -} - -func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { - re, err := syntax.Parse(expr, mode) - if err != nil { - return nil, err - } - maxCap := re.MaxCap() - capNames := re.CapNames() - - re = re.Simplify() - prog, err := syntax.Compile(re) - if err != nil { - return nil, err - } - matchcap := prog.NumCap - if matchcap < 2 { - matchcap = 2 - } - regexp := &Regexp{ - expr: expr, - prog: prog, - onepass: compileOnePass(prog), - numSubexp: maxCap, - subexpNames: capNames, - cond: prog.StartCond(), - longest: longest, - matchcap: matchcap, - minInputLen: minInputLen(re), - } - if regexp.onepass == nil { - regexp.prefix, regexp.prefixComplete = prog.Prefix() - regexp.maxBitStateLen = maxBitStateLen(prog) - } else { - regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) - } - if regexp.prefix != "" { - // TODO(rsc): Remove this allocation by adding - // IndexString to package bytes. - regexp.prefixBytes = []byte(regexp.prefix) - regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix) - } - - n := len(prog.Inst) - i := 0 - for matchSize[i] != 0 && matchSize[i] < n { - i++ - } - regexp.mpool = i - - return regexp, nil -} - -// Pools of *machine for use during (*Regexp).doExecute, -// split up by the size of the execution queues. -// matchPool[i] machines have queue size matchSize[i]. -// On a 64-bit system each queue entry is 16 bytes, -// so matchPool[0] has 16*2*128 = 4kB queues, etc. -// The final matchPool is a catch-all for very large queues. -var ( - matchSize = [...]int{128, 512, 2048, 16384, 0} - matchPool [len(matchSize)]sync.Pool -) - -// get returns a machine to use for matching re. -// It uses the re's machine cache if possible, to avoid -// unnecessary allocation. -func (re *Regexp) get() *machine { - m, ok := matchPool[re.mpool].Get().(*machine) - if !ok { - m = new(machine) - } - m.re = re - m.p = re.prog - if cap(m.matchcap) < re.matchcap { - m.matchcap = make([]int, re.matchcap) - for _, t := range m.pool { - t.cap = make([]int, re.matchcap) - } - } - - // Allocate queues if needed. - // Or reallocate, for "large" match pool. - n := matchSize[re.mpool] - if n == 0 { // large pool - n = len(re.prog.Inst) - } - if len(m.q0.sparse) < n { - m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} - m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} - } - return m -} - -// put returns a machine to the correct machine pool. -func (re *Regexp) put(m *machine) { - m.re = nil - m.p = nil - m.inputs.clear() - matchPool[re.mpool].Put(m) -} - -// minInputLen walks the regexp to find the minimum length of any matchable input. -func minInputLen(re *syntax.Regexp) int { - switch re.Op { - default: - return 0 - case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass: - return 1 - case syntax.OpLiteral: - l := 0 - for _, r := range re.Rune { - if r == utf8.RuneError { - l++ - } else { - l += utf8.RuneLen(r) - } - } - return l - case syntax.OpCapture, syntax.OpPlus: - return minInputLen(re.Sub[0]) - case syntax.OpRepeat: - return re.Min * minInputLen(re.Sub[0]) - case syntax.OpConcat: - l := 0 - for _, sub := range re.Sub { - l += minInputLen(sub) - } - return l - case syntax.OpAlternate: - l := minInputLen(re.Sub[0]) - var lnext int - for _, sub := range re.Sub[1:] { - lnext = minInputLen(sub) - if lnext < l { - l = lnext - } - } - return l - } -} - -// MustCompile is like [Compile] but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled regular -// expressions. -func MustCompile(str string) *Regexp { - regexp, err := Compile(str) - if err != nil { - panic(`regexp: Compile(` + quote(str) + `): ` + err.Error()) - } - return regexp -} - -// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled regular -// expressions. -func MustCompilePOSIX(str string) *Regexp { - regexp, err := CompilePOSIX(str) - if err != nil { - panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error()) - } - return regexp -} - -func quote(s string) string { - if strconv.CanBackquote(s) { - return "`" + s + "`" - } - return strconv.Quote(s) -} - -// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. -func (re *Regexp) NumSubexp() int { - return re.numSubexp -} - -// SubexpNames returns the names of the parenthesized subexpressions -// in this [Regexp]. The name for the first sub-expression is names[1], -// so that if m is a match slice, the name for m[i] is SubexpNames()[i]. -// Since the Regexp as a whole cannot be named, names[0] is always -// the empty string. The slice should not be modified. -func (re *Regexp) SubexpNames() []string { - return re.subexpNames -} - -// SubexpIndex returns the index of the first subexpression with the given name, -// or -1 if there is no subexpression with that name. -// -// Note that multiple subexpressions can be written using the same name, as in -// (?Pa+)(?Pb+), which declares two subexpressions named "bob". -// In this case, SubexpIndex returns the index of the leftmost such subexpression -// in the regular expression. -func (re *Regexp) SubexpIndex(name string) int { - if name != "" { - for i, s := range re.subexpNames { - if name == s { - return i - } - } - } - return -1 -} - -const endOfText rune = -1 - -// input abstracts different representations of the input text. It provides -// one-character lookahead. -type input interface { - step(pos int) (r rune, width int) // advance one rune - canCheckPrefix() bool // can we look ahead without losing info? - hasPrefix(re *Regexp) bool - index(re *Regexp, pos int) int - context(pos int) lazyFlag -} - -// inputString scans a string. -type inputString struct { - str string -} - -func (i *inputString) step(pos int) (rune, int) { - if pos < len(i.str) { - c := i.str[pos] - if c < utf8.RuneSelf { - return rune(c), 1 - } - return utf8.DecodeRuneInString(i.str[pos:]) - } - return endOfText, 0 -} - -func (i *inputString) canCheckPrefix() bool { - return true -} - -func (i *inputString) hasPrefix(re *Regexp) bool { - return strings.HasPrefix(i.str, re.prefix) -} - -func (i *inputString) index(re *Regexp, pos int) int { - return strings.Index(i.str[pos:], re.prefix) -} - -func (i *inputString) context(pos int) lazyFlag { - r1, r2 := endOfText, endOfText - // 0 < pos && pos <= len(i.str) - if uint(pos-1) < uint(len(i.str)) { - r1 = rune(i.str[pos-1]) - if r1 >= utf8.RuneSelf { - r1, _ = utf8.DecodeLastRuneInString(i.str[:pos]) - } - } - // 0 <= pos && pos < len(i.str) - if uint(pos) < uint(len(i.str)) { - r2 = rune(i.str[pos]) - if r2 >= utf8.RuneSelf { - r2, _ = utf8.DecodeRuneInString(i.str[pos:]) - } - } - return newLazyFlag(r1, r2) -} - -// inputBytes scans a byte slice. -type inputBytes struct { - str []byte -} - -func (i *inputBytes) step(pos int) (rune, int) { - if pos < len(i.str) { - c := i.str[pos] - if c < utf8.RuneSelf { - return rune(c), 1 - } - return utf8.DecodeRune(i.str[pos:]) - } - return endOfText, 0 -} - -func (i *inputBytes) canCheckPrefix() bool { - return true -} - -func (i *inputBytes) hasPrefix(re *Regexp) bool { - return bytes.HasPrefix(i.str, re.prefixBytes) -} - -func (i *inputBytes) index(re *Regexp, pos int) int { - return bytes.Index(i.str[pos:], re.prefixBytes) -} - -func (i *inputBytes) context(pos int) lazyFlag { - r1, r2 := endOfText, endOfText - // 0 < pos && pos <= len(i.str) - if uint(pos-1) < uint(len(i.str)) { - r1 = rune(i.str[pos-1]) - if r1 >= utf8.RuneSelf { - r1, _ = utf8.DecodeLastRune(i.str[:pos]) - } - } - // 0 <= pos && pos < len(i.str) - if uint(pos) < uint(len(i.str)) { - r2 = rune(i.str[pos]) - if r2 >= utf8.RuneSelf { - r2, _ = utf8.DecodeRune(i.str[pos:]) - } - } - return newLazyFlag(r1, r2) -} - -// inputReader scans a RuneReader. -type inputReader struct { - r io.RuneReader - atEOT bool - pos int -} - -func (i *inputReader) step(pos int) (rune, int) { - if !i.atEOT && pos != i.pos { - return endOfText, 0 - - } - r, w, err := i.r.ReadRune() - if err != nil { - i.atEOT = true - return endOfText, 0 - } - i.pos += w - return r, w -} - -func (i *inputReader) canCheckPrefix() bool { - return false -} - -func (i *inputReader) hasPrefix(re *Regexp) bool { - return false -} - -func (i *inputReader) index(re *Regexp, pos int) int { - return -1 -} - -func (i *inputReader) context(pos int) lazyFlag { - return 0 // not used -} - -// LiteralPrefix returns a literal string that must begin any match -// of the regular expression re. It returns the boolean true if the -// literal string comprises the entire regular expression. -func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { - return re.prefix, re.prefixComplete -} - -// MatchReader reports whether the text returned by the [io.RuneReader] -// contains any match of the regular expression re. -func (re *Regexp) MatchReader(r io.RuneReader) bool { - return re.doMatch(r, nil, "") -} - -// MatchString reports whether the string s -// contains any match of the regular expression re. -func (re *Regexp) MatchString(s string) bool { - return re.doMatch(nil, nil, s) -} - -// Match reports whether the byte slice b -// contains any match of the regular expression re. -func (re *Regexp) Match(b []byte) bool { - return re.doMatch(nil, b, "") -} - -// MatchReader reports whether the text returned by the RuneReader -// contains any match of the regular expression pattern. -// More complicated queries need to use [Compile] and the full [Regexp] interface. -func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { - re, err := Compile(pattern) - if err != nil { - return false, err - } - return re.MatchReader(r), nil -} - -// MatchString reports whether the string s -// contains any match of the regular expression pattern. -// More complicated queries need to use [Compile] and the full [Regexp] interface. -func MatchString(pattern string, s string) (matched bool, err error) { - re, err := Compile(pattern) - if err != nil { - return false, err - } - return re.MatchString(s), nil -} - -// Match reports whether the byte slice b -// contains any match of the regular expression pattern. -// More complicated queries need to use [Compile] and the full [Regexp] interface. -func Match(pattern string, b []byte) (matched bool, err error) { - re, err := Compile(pattern) - if err != nil { - return false, err - } - return re.Match(b), nil -} - -// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] -// with the replacement string repl. -// Inside repl, $ signs are interpreted as in [Regexp.Expand]. -func (re *Regexp) ReplaceAllString(src, repl string) string { - n := 2 - if strings.Contains(repl, "$") { - n = 2 * (re.numSubexp + 1) - } - b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte { - return re.expand(dst, repl, nil, src, match) - }) - return string(b) -} - -// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] -// with the replacement string repl. The replacement repl is substituted directly, -// without using [Regexp.Expand]. -func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { - return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { - return append(dst, repl...) - })) -} - -// ReplaceAllStringFunc returns a copy of src in which all matches of the -// [Regexp] have been replaced by the return value of function repl applied -// to the matched substring. The replacement returned by repl is substituted -// directly, without using [Regexp.Expand]. -func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { - return append(dst, repl(src[match[0]:match[1]])...) - }) - return string(b) -} - -func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte { - lastMatchEnd := 0 // end position of the most recent match - searchPos := 0 // position where we next look for a match - var buf []byte - var endPos int - if bsrc != nil { - endPos = len(bsrc) - } else { - endPos = len(src) - } - if nmatch > re.prog.NumCap { - nmatch = re.prog.NumCap - } - - var dstCap [2]int - for searchPos <= endPos { - a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0]) - if len(a) == 0 { - break // no more matches - } - - // Copy the unmatched characters before this match. - if bsrc != nil { - buf = append(buf, bsrc[lastMatchEnd:a[0]]...) - } else { - buf = append(buf, src[lastMatchEnd:a[0]]...) - } - - // Now insert a copy of the replacement string, but not for a - // match of the empty string immediately after another match. - // (Otherwise, we get double replacement for patterns that - // match both empty and nonempty strings.) - if a[1] > lastMatchEnd || a[0] == 0 { - buf = repl(buf, a) - } - lastMatchEnd = a[1] - - // Advance past this match; always advance at least one character. - var width int - if bsrc != nil { - _, width = utf8.DecodeRune(bsrc[searchPos:]) - } else { - _, width = utf8.DecodeRuneInString(src[searchPos:]) - } - if searchPos+width > a[1] { - searchPos += width - } else if searchPos+1 > a[1] { - // This clause is only needed at the end of the input - // string. In that case, DecodeRuneInString returns width=0. - searchPos++ - } else { - searchPos = a[1] - } - } - - // Copy the unmatched characters after the last match. - if bsrc != nil { - buf = append(buf, bsrc[lastMatchEnd:]...) - } else { - buf = append(buf, src[lastMatchEnd:]...) - } - - return buf -} - -// ReplaceAll returns a copy of src, replacing matches of the [Regexp] -// with the replacement text repl. -// Inside repl, $ signs are interpreted as in [Regexp.Expand]. -func (re *Regexp) ReplaceAll(src, repl []byte) []byte { - n := 2 - if bytes.IndexByte(repl, '$') >= 0 { - n = 2 * (re.numSubexp + 1) - } - srepl := "" - b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte { - if len(srepl) != len(repl) { - srepl = string(repl) - } - return re.expand(dst, srepl, src, "", match) - }) - return b -} - -// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] -// with the replacement bytes repl. The replacement repl is substituted directly, -// without using [Regexp.Expand]. -func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { - return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { - return append(dst, repl...) - }) -} - -// ReplaceAllFunc returns a copy of src in which all matches of the -// [Regexp] have been replaced by the return value of function repl applied -// to the matched byte slice. The replacement returned by repl is substituted -// directly, without using [Regexp.Expand]. -func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { - return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { - return append(dst, repl(src[match[0]:match[1]])...) - }) -} - -// Bitmap used by func special to check whether a character needs to be escaped. -var specialBytes [16]byte - -// special reports whether byte b needs to be escaped by QuoteMeta. -func special(b byte) bool { - return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0 -} - -func init() { - for _, b := range []byte(`\.+*?()|[]{}^$`) { - specialBytes[b%16] |= 1 << (b / 16) - } -} - -// QuoteMeta returns a string that escapes all regular expression metacharacters -// inside the argument text; the returned string is a regular expression matching -// the literal text. -func QuoteMeta(s string) string { - // A byte loop is correct because all metacharacters are ASCII. - var i int - for i = 0; i < len(s); i++ { - if special(s[i]) { - break - } - } - // No meta characters found, so return original string. - if i >= len(s) { - return s - } - - b := make([]byte, 2*len(s)-i) - copy(b, s[:i]) - j := i - for ; i < len(s); i++ { - if special(s[i]) { - b[j] = '\\' - j++ - } - b[j] = s[i] - j++ - } - return string(b[:j]) -} - -// The number of capture values in the program may correspond -// to fewer capturing expressions than are in the regexp. -// For example, "(a){0}" turns into an empty program, so the -// maximum capture in the program is 0 but we need to return -// an expression for \1. Pad appends -1s to the slice a as needed. -func (re *Regexp) pad(a []int) []int { - if a == nil { - // No match. - return nil - } - n := (1 + re.numSubexp) * 2 - for len(a) < n { - a = append(a, -1) - } - return a -} - -// allMatches calls deliver at most n times -// with the location of successive matches in the input text. -// The input text is b if non-nil, otherwise s. -func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) { - var end int - if b == nil { - end = len(s) - } else { - end = len(b) - } - - for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; { - matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil) - if len(matches) == 0 { - break - } - - accept := true - if matches[1] == pos { - // We've found an empty match. - if matches[0] == prevMatchEnd { - // We don't allow an empty match right - // after a previous match, so ignore it. - accept = false - } - var width int - if b == nil { - is := inputString{str: s} - _, width = is.step(pos) - } else { - ib := inputBytes{str: b} - _, width = ib.step(pos) - } - if width > 0 { - pos += width - } else { - pos = end + 1 - } - } else { - pos = matches[1] - } - prevMatchEnd = matches[1] - - if accept { - deliver(re.pad(matches)) - i++ - } - } -} - -// Find returns a slice holding the text of the leftmost match in b of the regular expression. -// A return value of nil indicates no match. -func (re *Regexp) Find(b []byte) []byte { - var dstCap [2]int - a := re.doExecute(nil, b, "", 0, 2, dstCap[:0]) - if a == nil { - return nil - } - return b[a[0]:a[1]:a[1]] -} - -// FindIndex returns a two-element slice of integers defining the location of -// the leftmost match in b of the regular expression. The match itself is at -// b[loc[0]:loc[1]]. -// A return value of nil indicates no match. -func (re *Regexp) FindIndex(b []byte) (loc []int) { - a := re.doExecute(nil, b, "", 0, 2, nil) - if a == nil { - return nil - } - return a[0:2] -} - -// FindString returns a string holding the text of the leftmost match in s of the regular -// expression. If there is no match, the return value is an empty string, -// but it will also be empty if the regular expression successfully matches -// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is -// necessary to distinguish these cases. -func (re *Regexp) FindString(s string) string { - var dstCap [2]int - a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0]) - if a == nil { - return "" - } - return s[a[0]:a[1]] -} - -// FindStringIndex returns a two-element slice of integers defining the -// location of the leftmost match in s of the regular expression. The match -// itself is at s[loc[0]:loc[1]]. -// A return value of nil indicates no match. -func (re *Regexp) FindStringIndex(s string) (loc []int) { - a := re.doExecute(nil, nil, s, 0, 2, nil) - if a == nil { - return nil - } - return a[0:2] -} - -// FindReaderIndex returns a two-element slice of integers defining the -// location of the leftmost match of the regular expression in text read from -// the [io.RuneReader]. The match text was found in the input stream at -// byte offset loc[0] through loc[1]-1. -// A return value of nil indicates no match. -func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { - a := re.doExecute(r, nil, "", 0, 2, nil) - if a == nil { - return nil - } - return a[0:2] -} - -// FindSubmatch returns a slice of slices holding the text of the leftmost -// match of the regular expression in b and the matches, if any, of its -// subexpressions, as defined by the 'Submatch' descriptions in the package -// comment. -// A return value of nil indicates no match. -func (re *Regexp) FindSubmatch(b []byte) [][]byte { - var dstCap [4]int - a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0]) - if a == nil { - return nil - } - ret := make([][]byte, 1+re.numSubexp) - for i := range ret { - if 2*i < len(a) && a[2*i] >= 0 { - ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]] - } - } - return ret -} - -// Expand appends template to dst and returns the result; during the -// append, Expand replaces variables in the template with corresponding -// matches drawn from src. The match slice should have been returned by -// [Regexp.FindSubmatchIndex]. -// -// In the template, a variable is denoted by a substring of the form -// $name or ${name}, where name is a non-empty sequence of letters, -// digits, and underscores. A purely numeric name like $1 refers to -// the submatch with the corresponding index; other names refer to -// capturing parentheses named with the (?P...) syntax. A -// reference to an out of range or unmatched index or a name that is not -// present in the regular expression is replaced with an empty slice. -// -// In the $name form, name is taken to be as long as possible: $1x is -// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0. -// -// To insert a literal $ in the output, use $$ in the template. -func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { - return re.expand(dst, string(template), src, "", match) -} - -// ExpandString is like [Regexp.Expand] but the template and source are strings. -// It appends to and returns a byte slice in order to give the calling -// code control over allocation. -func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { - return re.expand(dst, template, nil, src, match) -} - -func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte { - for len(template) > 0 { - before, after, ok := strings.Cut(template, "$") - if !ok { - break - } - dst = append(dst, before...) - template = after - if template != "" && template[0] == '$' { - // Treat $$ as $. - dst = append(dst, '$') - template = template[1:] - continue - } - name, num, rest, ok := extract(template) - if !ok { - // Malformed; treat $ as raw text. - dst = append(dst, '$') - continue - } - template = rest - if num >= 0 { - if 2*num+1 < len(match) && match[2*num] >= 0 { - if bsrc != nil { - dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...) - } else { - dst = append(dst, src[match[2*num]:match[2*num+1]]...) - } - } - } else { - for i, namei := range re.subexpNames { - if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 { - if bsrc != nil { - dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...) - } else { - dst = append(dst, src[match[2*i]:match[2*i+1]]...) - } - break - } - } - } - } - dst = append(dst, template...) - return dst -} - -// extract returns the name from a leading "name" or "{name}" in str. -// (The $ has already been removed by the caller.) -// If it is a number, extract returns num set to that number; otherwise num = -1. -func extract(str string) (name string, num int, rest string, ok bool) { - if str == "" { - return - } - brace := false - if str[0] == '{' { - brace = true - str = str[1:] - } - i := 0 - for i < len(str) { - rune, size := utf8.DecodeRuneInString(str[i:]) - if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' { - break - } - i += size - } - if i == 0 { - // empty name is not okay - return - } - name = str[:i] - if brace { - if i >= len(str) || str[i] != '}' { - // missing closing brace - return - } - i++ - } - - // Parse number. - num = 0 - for i := 0; i < len(name); i++ { - if name[i] < '0' || '9' < name[i] || num >= 1e8 { - num = -1 - break - } - num = num*10 + int(name[i]) - '0' - } - // Disallow leading zeros. - if name[0] == '0' && len(name) > 1 { - num = -1 - } - - rest = str[i:] - ok = true - return -} - -// FindSubmatchIndex returns a slice holding the index pairs identifying the -// leftmost match of the regular expression in b and the matches, if any, of -// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions -// in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindSubmatchIndex(b []byte) []int { - return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil)) -} - -// FindStringSubmatch returns a slice of strings holding the text of the -// leftmost match of the regular expression in s and the matches, if any, of -// its subexpressions, as defined by the 'Submatch' description in the -// package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindStringSubmatch(s string) []string { - var dstCap [4]int - a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0]) - if a == nil { - return nil - } - ret := make([]string, 1+re.numSubexp) - for i := range ret { - if 2*i < len(a) && a[2*i] >= 0 { - ret[i] = s[a[2*i]:a[2*i+1]] - } - } - return ret -} - -// FindStringSubmatchIndex returns a slice holding the index pairs -// identifying the leftmost match of the regular expression in s and the -// matches, if any, of its subexpressions, as defined by the 'Submatch' and -// 'Index' descriptions in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindStringSubmatchIndex(s string) []int { - return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil)) -} - -// FindReaderSubmatchIndex returns a slice holding the index pairs -// identifying the leftmost match of the regular expression of text read by -// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined -// by the 'Submatch' and 'Index' descriptions in the package comment. A -// return value of nil indicates no match. -func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { - return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil)) -} - -const startSize = 10 // The size at which to start a slice in the 'All' routines. - -// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive -// matches of the expression, as defined by the 'All' description in the -// package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAll(b []byte, n int) [][]byte { - if n < 0 { - n = len(b) + 1 - } - var result [][]byte - re.allMatches("", b, n, func(match []int) { - if result == nil { - result = make([][]byte, 0, startSize) - } - result = append(result, b[match[0]:match[1]:match[1]]) - }) - return result -} - -// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all -// successive matches of the expression, as defined by the 'All' description -// in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { - if n < 0 { - n = len(b) + 1 - } - var result [][]int - re.allMatches("", b, n, func(match []int) { - if result == nil { - result = make([][]int, 0, startSize) - } - result = append(result, match[0:2]) - }) - return result -} - -// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all -// successive matches of the expression, as defined by the 'All' description -// in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllString(s string, n int) []string { - if n < 0 { - n = len(s) + 1 - } - var result []string - re.allMatches(s, nil, n, func(match []int) { - if result == nil { - result = make([]string, 0, startSize) - } - result = append(result, s[match[0]:match[1]]) - }) - return result -} - -// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a -// slice of all successive matches of the expression, as defined by the 'All' -// description in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { - if n < 0 { - n = len(s) + 1 - } - var result [][]int - re.allMatches(s, nil, n, func(match []int) { - if result == nil { - result = make([][]int, 0, startSize) - } - result = append(result, match[0:2]) - }) - return result -} - -// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice -// of all successive matches of the expression, as defined by the 'All' -// description in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { - if n < 0 { - n = len(b) + 1 - } - var result [][][]byte - re.allMatches("", b, n, func(match []int) { - if result == nil { - result = make([][][]byte, 0, startSize) - } - slice := make([][]byte, len(match)/2) - for j := range slice { - if match[2*j] >= 0 { - slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]] - } - } - result = append(result, slice) - }) - return result -} - -// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns -// a slice of all successive matches of the expression, as defined by the -// 'All' description in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { - if n < 0 { - n = len(b) + 1 - } - var result [][]int - re.allMatches("", b, n, func(match []int) { - if result == nil { - result = make([][]int, 0, startSize) - } - result = append(result, match) - }) - return result -} - -// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it -// returns a slice of all successive matches of the expression, as defined by -// the 'All' description in the package comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - if n < 0 { - n = len(s) + 1 - } - var result [][]string - re.allMatches(s, nil, n, func(match []int) { - if result == nil { - result = make([][]string, 0, startSize) - } - slice := make([]string, len(match)/2) - for j := range slice { - if match[2*j] >= 0 { - slice[j] = s[match[2*j]:match[2*j+1]] - } - } - result = append(result, slice) - }) - return result -} - -// FindAllStringSubmatchIndex is the 'All' version of -// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of -// the expression, as defined by the 'All' description in the package -// comment. -// A return value of nil indicates no match. -func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { - if n < 0 { - n = len(s) + 1 - } - var result [][]int - re.allMatches(s, nil, n, func(match []int) { - if result == nil { - result = make([][]int, 0, startSize) - } - result = append(result, match) - }) - return result -} - -// Split slices s into substrings separated by the expression and returns a slice of -// the substrings between those expression matches. -// -// The slice returned by this method consists of all the substrings of s -// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression -// that contains no metacharacters, it is equivalent to [strings.SplitN]. -// -// Example: -// -// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) -// // s: ["", "b", "b", "c", "cadaaae"] -// -// The count determines the number of substrings to return: -// -// n > 0: at most n substrings; the last substring will be the unsplit remainder. -// n == 0: the result is nil (zero substrings) -// n < 0: all substrings -func (re *Regexp) Split(s string, n int) []string { - - if n == 0 { - return nil - } - - if len(re.expr) > 0 && len(s) == 0 { - return []string{""} - } - - matches := re.FindAllStringIndex(s, n) - strings := make([]string, 0, len(matches)) - - beg := 0 - end := 0 - for _, match := range matches { - if n > 0 && len(strings) >= n-1 { - break - } - - end = match[0] - if match[1] != 0 { - strings = append(strings, s[beg:end]) - } - beg = match[1] - } - - if end != len(s) { - strings = append(strings, s[beg:]) - } - - return strings -} - -// MarshalText implements [encoding.TextMarshaler]. The output -// matches that of calling the [Regexp.String] method. -// -// Note that the output is lossy in some cases: This method does not indicate -// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or -// those for which the [Regexp.Longest] method has been called. -func (re *Regexp) MarshalText() ([]byte, error) { - return []byte(re.String()), nil -} - -// UnmarshalText implements [encoding.TextUnmarshaler] by calling -// [Compile] on the encoded value. -func (re *Regexp) UnmarshalText(text []byte) error { - newRE, err := Compile(string(text)) - if err != nil { - return err - } - *re = *newRE - return nil -} diff --git a/vendor/github.com/grafana/regexp/syntax/compile.go b/vendor/github.com/grafana/regexp/syntax/compile.go deleted file mode 100644 index c9f9fa02..00000000 --- a/vendor/github.com/grafana/regexp/syntax/compile.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -import "unicode" - -// A patchList is a list of instruction pointers that need to be filled in (patched). -// Because the pointers haven't been filled in yet, we can reuse their storage -// to hold the list. It's kind of sleazy, but works well in practice. -// See https://swtch.com/~rsc/regexp/regexp1.html for inspiration. -// -// These aren't really pointers: they're integers, so we can reinterpret them -// this way without using package unsafe. A value l.head denotes -// p.inst[l.head>>1].Out (l.head&1==0) or .Arg (l.head&1==1). -// head == 0 denotes the empty list, okay because we start every program -// with a fail instruction, so we'll never want to point at its output link. -type patchList struct { - head, tail uint32 -} - -func makePatchList(n uint32) patchList { - return patchList{n, n} -} - -func (l patchList) patch(p *Prog, val uint32) { - head := l.head - for head != 0 { - i := &p.Inst[head>>1] - if head&1 == 0 { - head = i.Out - i.Out = val - } else { - head = i.Arg - i.Arg = val - } - } -} - -func (l1 patchList) append(p *Prog, l2 patchList) patchList { - if l1.head == 0 { - return l2 - } - if l2.head == 0 { - return l1 - } - - i := &p.Inst[l1.tail>>1] - if l1.tail&1 == 0 { - i.Out = l2.head - } else { - i.Arg = l2.head - } - return patchList{l1.head, l2.tail} -} - -// A frag represents a compiled program fragment. -type frag struct { - i uint32 // index of first instruction - out patchList // where to record end instruction - nullable bool // whether fragment can match empty string -} - -type compiler struct { - p *Prog -} - -// Compile compiles the regexp into a program to be executed. -// The regexp should have been simplified already (returned from re.Simplify). -func Compile(re *Regexp) (*Prog, error) { - var c compiler - c.init() - f := c.compile(re) - f.out.patch(c.p, c.inst(InstMatch).i) - c.p.Start = int(f.i) - return c.p, nil -} - -func (c *compiler) init() { - c.p = new(Prog) - c.p.NumCap = 2 // implicit ( and ) for whole match $0 - c.inst(InstFail) -} - -var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} -var anyRune = []rune{0, unicode.MaxRune} - -func (c *compiler) compile(re *Regexp) frag { - switch re.Op { - case OpNoMatch: - return c.fail() - case OpEmptyMatch: - return c.nop() - case OpLiteral: - if len(re.Rune) == 0 { - return c.nop() - } - var f frag - for j := range re.Rune { - f1 := c.rune(re.Rune[j:j+1], re.Flags) - if j == 0 { - f = f1 - } else { - f = c.cat(f, f1) - } - } - return f - case OpCharClass: - return c.rune(re.Rune, re.Flags) - case OpAnyCharNotNL: - return c.rune(anyRuneNotNL, 0) - case OpAnyChar: - return c.rune(anyRune, 0) - case OpBeginLine: - return c.empty(EmptyBeginLine) - case OpEndLine: - return c.empty(EmptyEndLine) - case OpBeginText: - return c.empty(EmptyBeginText) - case OpEndText: - return c.empty(EmptyEndText) - case OpWordBoundary: - return c.empty(EmptyWordBoundary) - case OpNoWordBoundary: - return c.empty(EmptyNoWordBoundary) - case OpCapture: - bra := c.cap(uint32(re.Cap << 1)) - sub := c.compile(re.Sub[0]) - ket := c.cap(uint32(re.Cap<<1 | 1)) - return c.cat(c.cat(bra, sub), ket) - case OpStar: - return c.star(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) - case OpPlus: - return c.plus(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) - case OpQuest: - return c.quest(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) - case OpConcat: - if len(re.Sub) == 0 { - return c.nop() - } - var f frag - for i, sub := range re.Sub { - if i == 0 { - f = c.compile(sub) - } else { - f = c.cat(f, c.compile(sub)) - } - } - return f - case OpAlternate: - var f frag - for _, sub := range re.Sub { - f = c.alt(f, c.compile(sub)) - } - return f - } - panic("regexp: unhandled case in compile") -} - -func (c *compiler) inst(op InstOp) frag { - // TODO: impose length limit - f := frag{i: uint32(len(c.p.Inst)), nullable: true} - c.p.Inst = append(c.p.Inst, Inst{Op: op}) - return f -} - -func (c *compiler) nop() frag { - f := c.inst(InstNop) - f.out = makePatchList(f.i << 1) - return f -} - -func (c *compiler) fail() frag { - return frag{} -} - -func (c *compiler) cap(arg uint32) frag { - f := c.inst(InstCapture) - f.out = makePatchList(f.i << 1) - c.p.Inst[f.i].Arg = arg - - if c.p.NumCap < int(arg)+1 { - c.p.NumCap = int(arg) + 1 - } - return f -} - -func (c *compiler) cat(f1, f2 frag) frag { - // concat of failure is failure - if f1.i == 0 || f2.i == 0 { - return frag{} - } - - // TODO: elide nop - - f1.out.patch(c.p, f2.i) - return frag{f1.i, f2.out, f1.nullable && f2.nullable} -} - -func (c *compiler) alt(f1, f2 frag) frag { - // alt of failure is other - if f1.i == 0 { - return f2 - } - if f2.i == 0 { - return f1 - } - - f := c.inst(InstAlt) - i := &c.p.Inst[f.i] - i.Out = f1.i - i.Arg = f2.i - f.out = f1.out.append(c.p, f2.out) - f.nullable = f1.nullable || f2.nullable - return f -} - -func (c *compiler) quest(f1 frag, nongreedy bool) frag { - f := c.inst(InstAlt) - i := &c.p.Inst[f.i] - if nongreedy { - i.Arg = f1.i - f.out = makePatchList(f.i << 1) - } else { - i.Out = f1.i - f.out = makePatchList(f.i<<1 | 1) - } - f.out = f.out.append(c.p, f1.out) - return f -} - -// loop returns the fragment for the main loop of a plus or star. -// For plus, it can be used after changing the entry to f1.i. -// For star, it can be used directly when f1 can't match an empty string. -// (When f1 can match an empty string, f1* must be implemented as (f1+)? -// to get the priority match order correct.) -func (c *compiler) loop(f1 frag, nongreedy bool) frag { - f := c.inst(InstAlt) - i := &c.p.Inst[f.i] - if nongreedy { - i.Arg = f1.i - f.out = makePatchList(f.i << 1) - } else { - i.Out = f1.i - f.out = makePatchList(f.i<<1 | 1) - } - f1.out.patch(c.p, f.i) - return f -} - -func (c *compiler) star(f1 frag, nongreedy bool) frag { - if f1.nullable { - // Use (f1+)? to get priority match order correct. - // See golang.org/issue/46123. - return c.quest(c.plus(f1, nongreedy), nongreedy) - } - return c.loop(f1, nongreedy) -} - -func (c *compiler) plus(f1 frag, nongreedy bool) frag { - return frag{f1.i, c.loop(f1, nongreedy).out, f1.nullable} -} - -func (c *compiler) empty(op EmptyOp) frag { - f := c.inst(InstEmptyWidth) - c.p.Inst[f.i].Arg = uint32(op) - f.out = makePatchList(f.i << 1) - return f -} - -func (c *compiler) rune(r []rune, flags Flags) frag { - f := c.inst(InstRune) - f.nullable = false - i := &c.p.Inst[f.i] - i.Rune = r - flags &= FoldCase // only relevant flag is FoldCase - if len(r) != 1 || unicode.SimpleFold(r[0]) == r[0] { - // and sometimes not even that - flags &^= FoldCase - } - i.Arg = uint32(flags) - f.out = makePatchList(f.i << 1) - - // Special cases for exec machine. - switch { - case flags&FoldCase == 0 && (len(r) == 1 || len(r) == 2 && r[0] == r[1]): - i.Op = InstRune1 - case len(r) == 2 && r[0] == 0 && r[1] == unicode.MaxRune: - i.Op = InstRuneAny - case len(r) == 4 && r[0] == 0 && r[1] == '\n'-1 && r[2] == '\n'+1 && r[3] == unicode.MaxRune: - i.Op = InstRuneAnyNotNL - } - - return f -} diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go deleted file mode 100644 index 877f1043..00000000 --- a/vendor/github.com/grafana/regexp/syntax/doc.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT. - -/* -Package syntax parses regular expressions into parse trees and compiles -parse trees into programs. Most clients of regular expressions will use the -facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. - -# Syntax - -The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. -Parts of the syntax can be disabled by passing alternate flags to [Parse]. - -Single characters: - - . any character, possibly including newline (flag s=true) - [xyz] character class - [^xyz] negated character class - \d Perl character class - \D negated Perl character class - [[:alpha:]] ASCII character class - [[:^alpha:]] negated ASCII character class - \pN Unicode character class (one-letter name) - \p{Greek} Unicode character class - \PN negated Unicode character class (one-letter name) - \P{Greek} negated Unicode character class - -Composites: - - xy x followed by y - x|y x or y (prefer x) - -Repetitions: - - x* zero or more x, prefer more - x+ one or more x, prefer more - x? zero or one x, prefer one - x{n,m} n or n+1 or ... or m x, prefer more - x{n,} n or more x, prefer more - x{n} exactly n x - x*? zero or more x, prefer fewer - x+? one or more x, prefer fewer - x?? zero or one x, prefer zero - x{n,m}? n or n+1 or ... or m x, prefer fewer - x{n,}? n or more x, prefer fewer - x{n}? exactly n x - -Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} -reject forms that create a minimum or maximum repetition count above 1000. -Unlimited repetitions are not subject to this restriction. - -Grouping: - - (re) numbered capturing group (submatch) - (?Pre) named & numbered capturing group (submatch) - (?re) named & numbered capturing group (submatch) - (?:re) non-capturing group - (?flags) set flags within current group; non-capturing - (?flags:re) set flags during re; non-capturing - - Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: - - i case-insensitive (default false) - m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) - s let . match \n (default false) - U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) - -Empty strings: - - ^ at beginning of text or line (flag m=true) - $ at end of text (like \z not \Z) or line (flag m=true) - \A at beginning of text - \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) - \B not at ASCII word boundary - \z at end of text - -Escape sequences: - - \a bell (== \007) - \f form feed (== \014) - \t horizontal tab (== \011) - \n newline (== \012) - \r carriage return (== \015) - \v vertical tab character (== \013) - \* literal *, for any punctuation character * - \123 octal character code (up to three digits) - \x7F hex character code (exactly two digits) - \x{10FFFF} hex character code - \Q...\E literal text ... even if ... has punctuation - -Character class elements: - - x single character - A-Z character range (inclusive) - \d Perl character class - [:foo:] ASCII character class foo - \p{Foo} Unicode character class Foo - \pF Unicode character class F (one-letter name) - -Named character classes as character class elements: - - [\d] digits (== \d) - [^\d] not digits (== \D) - [\D] not digits (== \D) - [^\D] not not digits (== \d) - [[:name:]] named ASCII class inside character class (== [:name:]) - [^[:name:]] named ASCII class inside negated character class (== [:^name:]) - [\p{Name}] named Unicode property inside character class (== \p{Name}) - [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) - -Perl character classes (all ASCII-only): - - \d digits (== [0-9]) - \D not digits (== [^0-9]) - \s whitespace (== [\t\n\f\r ]) - \S not whitespace (== [^\t\n\f\r ]) - \w word characters (== [0-9A-Za-z_]) - \W not word characters (== [^0-9A-Za-z_]) - -ASCII character classes: - - [[:alnum:]] alphanumeric (== [0-9A-Za-z]) - [[:alpha:]] alphabetic (== [A-Za-z]) - [[:ascii:]] ASCII (== [\x00-\x7F]) - [[:blank:]] blank (== [\t ]) - [[:cntrl:]] control (== [\x00-\x1F\x7F]) - [[:digit:]] digits (== [0-9]) - [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) - [[:lower:]] lower case (== [a-z]) - [[:print:]] printable (== [ -~] == [ [:graph:]]) - [[:punct:]] punctuation (== [!-/:-@[-`{-~]) - [[:space:]] whitespace (== [\t\n\v\f\r ]) - [[:upper:]] upper case (== [A-Z]) - [[:word:]] word characters (== [0-9A-Za-z_]) - [[:xdigit:]] hex digit (== [0-9A-Fa-f]) - -Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. -*/ -package syntax diff --git a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl deleted file mode 100644 index 80a2c9ae..00000000 --- a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/perl -# Copyright 2008 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# Modified version of RE2's make_perl_groups.pl. - -# Generate table entries giving character ranges -# for POSIX/Perl character classes. Rather than -# figure out what the definition is, it is easier to ask -# Perl about each letter from 0-128 and write down -# its answer. - -@posixclasses = ( - "[:alnum:]", - "[:alpha:]", - "[:ascii:]", - "[:blank:]", - "[:cntrl:]", - "[:digit:]", - "[:graph:]", - "[:lower:]", - "[:print:]", - "[:punct:]", - "[:space:]", - "[:upper:]", - "[:word:]", - "[:xdigit:]", -); - -@perlclasses = ( - "\\d", - "\\s", - "\\w", -); - -%overrides = ( - # Prior to Perl 5.18, \s did not match vertical tab. - # RE2 preserves that original behaviour. - "\\s:11" => 0, -); - -sub ComputeClass($) { - my @ranges; - my ($class) = @_; - my $regexp = "[$class]"; - my $start = -1; - for (my $i=0; $i<=129; $i++) { - if ($i == 129) { $i = 256; } - if ($i <= 128 && ($overrides{"$class:$i"} // chr($i) =~ $regexp)) { - if ($start < 0) { - $start = $i; - } - } else { - if ($start >= 0) { - push @ranges, [$start, $i-1]; - } - $start = -1; - } - } - return @ranges; -} - -sub PrintClass($$@) { - my ($cname, $name, @ranges) = @_; - print "var code$cname = []rune{ /* $name */\n"; - for (my $i=0; $i<@ranges; $i++) { - my @a = @{$ranges[$i]}; - printf "\t0x%x, 0x%x,\n", $a[0], $a[1]; - } - print "}\n\n"; - my $n = @ranges; - $negname = $name; - if ($negname =~ /:/) { - $negname =~ s/:/:^/; - } else { - $negname =~ y/a-z/A-Z/; - } - return "\t`$name`: {+1, code$cname},\n" . - "\t`$negname`: {-1, code$cname},\n"; -} - -my $gen = 0; - -sub PrintClasses($@) { - my ($cname, @classes) = @_; - my @entries; - foreach my $cl (@classes) { - my @ranges = ComputeClass($cl); - push @entries, PrintClass(++$gen, $cl, @ranges); - } - print "var ${cname}Group = map[string]charGroup{\n"; - foreach my $e (@entries) { - print $e; - } - print "}\n"; - my $count = @entries; -} - -print <perl_groups.go - -package syntax - -EOF - -PrintClasses("perl", @perlclasses); -PrintClasses("posix", @posixclasses); diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go deleted file mode 100644 index 1368f5b7..00000000 --- a/vendor/github.com/grafana/regexp/syntax/op_string.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by "stringer -type Op -trimprefix Op"; DO NOT EDIT. - -package syntax - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[OpNoMatch-1] - _ = x[OpEmptyMatch-2] - _ = x[OpLiteral-3] - _ = x[OpCharClass-4] - _ = x[OpAnyCharNotNL-5] - _ = x[OpAnyChar-6] - _ = x[OpBeginLine-7] - _ = x[OpEndLine-8] - _ = x[OpBeginText-9] - _ = x[OpEndText-10] - _ = x[OpWordBoundary-11] - _ = x[OpNoWordBoundary-12] - _ = x[OpCapture-13] - _ = x[OpStar-14] - _ = x[OpPlus-15] - _ = x[OpQuest-16] - _ = x[OpRepeat-17] - _ = x[OpConcat-18] - _ = x[OpAlternate-19] - _ = x[opPseudo-128] -} - -const ( - _Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate" - _Op_name_1 = "opPseudo" -) - -var ( - _Op_index_0 = [...]uint8{0, 7, 17, 24, 33, 45, 52, 61, 68, 77, 84, 96, 110, 117, 121, 125, 130, 136, 142, 151} -) - -func (i Op) String() string { - switch { - case 1 <= i && i <= 19: - i -= 1 - return _Op_name_0[_Op_index_0[i]:_Op_index_0[i+1]] - case i == 128: - return _Op_name_1 - default: - return "Op(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go deleted file mode 100644 index 6ed6491c..00000000 --- a/vendor/github.com/grafana/regexp/syntax/parse.go +++ /dev/null @@ -1,2136 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -import ( - "sort" - "strings" - "unicode" - "unicode/utf8" -) - -// An Error describes a failure to parse a regular expression -// and gives the offending expression. -type Error struct { - Code ErrorCode - Expr string -} - -func (e *Error) Error() string { - return "error parsing regexp: " + e.Code.String() + ": `" + e.Expr + "`" -} - -// An ErrorCode describes a failure to parse a regular expression. -type ErrorCode string - -const ( - // Unexpected error - ErrInternalError ErrorCode = "regexp/syntax: internal error" - - // Parse errors - ErrInvalidCharClass ErrorCode = "invalid character class" - ErrInvalidCharRange ErrorCode = "invalid character class range" - ErrInvalidEscape ErrorCode = "invalid escape sequence" - ErrInvalidNamedCapture ErrorCode = "invalid named capture" - ErrInvalidPerlOp ErrorCode = "invalid or unsupported Perl syntax" - ErrInvalidRepeatOp ErrorCode = "invalid nested repetition operator" - ErrInvalidRepeatSize ErrorCode = "invalid repeat count" - ErrInvalidUTF8 ErrorCode = "invalid UTF-8" - ErrMissingBracket ErrorCode = "missing closing ]" - ErrMissingParen ErrorCode = "missing closing )" - ErrMissingRepeatArgument ErrorCode = "missing argument to repetition operator" - ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" - ErrUnexpectedParen ErrorCode = "unexpected )" - ErrNestingDepth ErrorCode = "expression nests too deeply" - ErrLarge ErrorCode = "expression too large" -) - -func (e ErrorCode) String() string { - return string(e) -} - -// Flags control the behavior of the parser and record information about regexp context. -type Flags uint16 - -const ( - FoldCase Flags = 1 << iota // case-insensitive match - Literal // treat pattern as literal string - ClassNL // allow character classes like [^a-z] and [[:space:]] to match newline - DotNL // allow . to match newline - OneLine // treat ^ and $ as only matching at beginning and end of text - NonGreedy // make repetition operators default to non-greedy - PerlX // allow Perl extensions - UnicodeGroups // allow \p{Han}, \P{Han} for Unicode group and negation - WasDollar // regexp OpEndText was $, not \z - Simple // regexp contains no counted repetition - - MatchNL = ClassNL | DotNL - - Perl = ClassNL | OneLine | PerlX | UnicodeGroups // as close to Perl as possible - POSIX Flags = 0 // POSIX syntax -) - -// Pseudo-ops for parsing stack. -const ( - opLeftParen = opPseudo + iota - opVerticalBar -) - -// maxHeight is the maximum height of a regexp parse tree. -// It is somewhat arbitrarily chosen, but the idea is to be large enough -// that no one will actually hit in real use but at the same time small enough -// that recursion on the Regexp tree will not hit the 1GB Go stack limit. -// The maximum amount of stack for a single recursive frame is probably -// closer to 1kB, so this could potentially be raised, but it seems unlikely -// that people have regexps nested even this deeply. -// We ran a test on Google's C++ code base and turned up only -// a single use case with depth > 100; it had depth 128. -// Using depth 1000 should be plenty of margin. -// As an optimization, we don't even bother calculating heights -// until we've allocated at least maxHeight Regexp structures. -const maxHeight = 1000 - -// maxSize is the maximum size of a compiled regexp in Insts. -// It too is somewhat arbitrarily chosen, but the idea is to be large enough -// to allow significant regexps while at the same time small enough that -// the compiled form will not take up too much memory. -// 128 MB is enough for a 3.3 million Inst structures, which roughly -// corresponds to a 3.3 MB regexp. -const ( - maxSize = 128 << 20 / instSize - instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words -) - -// maxRunes is the maximum number of runes allowed in a regexp tree -// counting the runes in all the nodes. -// Ignoring character classes p.numRunes is always less than the length of the regexp. -// Character classes can make it much larger: each \pL adds 1292 runes. -// 128 MB is enough for 32M runes, which is over 26k \pL instances. -// Note that repetitions do not make copies of the rune slices, -// so \pL{1000} is only one rune slice, not 1000. -// We could keep a cache of character classes we've seen, -// so that all the \pL we see use the same rune list, -// but that doesn't remove the problem entirely: -// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()]. -// And because the Rune slice is exposed directly in the Regexp, -// there is not an opportunity to change the representation to allow -// partial sharing between different character classes. -// So the limit is the best we can do. -const ( - maxRunes = 128 << 20 / runeSize - runeSize = 4 // rune is int32 -) - -type parser struct { - flags Flags // parse mode flags - stack []*Regexp // stack of parsed expressions - free *Regexp - numCap int // number of capturing groups seen - wholeRegexp string - tmpClass []rune // temporary char class work space - numRegexp int // number of regexps allocated - numRunes int // number of runes in char classes - repeats int64 // product of all repetitions seen - height map[*Regexp]int // regexp height, for height limit check - size map[*Regexp]int64 // regexp compiled size, for size limit check -} - -func (p *parser) newRegexp(op Op) *Regexp { - re := p.free - if re != nil { - p.free = re.Sub0[0] - *re = Regexp{} - } else { - re = new(Regexp) - p.numRegexp++ - } - re.Op = op - return re -} - -func (p *parser) reuse(re *Regexp) { - if p.height != nil { - delete(p.height, re) - } - re.Sub0[0] = p.free - p.free = re -} - -func (p *parser) checkLimits(re *Regexp) { - if p.numRunes > maxRunes { - panic(ErrLarge) - } - p.checkSize(re) - p.checkHeight(re) -} - -func (p *parser) checkSize(re *Regexp) { - if p.size == nil { - // We haven't started tracking size yet. - // Do a relatively cheap check to see if we need to start. - // Maintain the product of all the repeats we've seen - // and don't track if the total number of regexp nodes - // we've seen times the repeat product is in budget. - if p.repeats == 0 { - p.repeats = 1 - } - if re.Op == OpRepeat { - n := re.Max - if n == -1 { - n = re.Min - } - if n <= 0 { - n = 1 - } - if int64(n) > maxSize/p.repeats { - p.repeats = maxSize - } else { - p.repeats *= int64(n) - } - } - if int64(p.numRegexp) < maxSize/p.repeats { - return - } - - // We need to start tracking size. - // Make the map and belatedly populate it - // with info about everything we've constructed so far. - p.size = make(map[*Regexp]int64) - for _, re := range p.stack { - p.checkSize(re) - } - } - - if p.calcSize(re, true) > maxSize { - panic(ErrLarge) - } -} - -func (p *parser) calcSize(re *Regexp, force bool) int64 { - if !force { - if size, ok := p.size[re]; ok { - return size - } - } - - var size int64 - switch re.Op { - case OpLiteral: - size = int64(len(re.Rune)) - case OpCapture, OpStar: - // star can be 1+ or 2+; assume 2 pessimistically - size = 2 + p.calcSize(re.Sub[0], false) - case OpPlus, OpQuest: - size = 1 + p.calcSize(re.Sub[0], false) - case OpConcat: - for _, sub := range re.Sub { - size += p.calcSize(sub, false) - } - case OpAlternate: - for _, sub := range re.Sub { - size += p.calcSize(sub, false) - } - if len(re.Sub) > 1 { - size += int64(len(re.Sub)) - 1 - } - case OpRepeat: - sub := p.calcSize(re.Sub[0], false) - if re.Max == -1 { - if re.Min == 0 { - size = 2 + sub // x* - } else { - size = 1 + int64(re.Min)*sub // xxx+ - } - break - } - // x{2,5} = xx(x(x(x)?)?)? - size = int64(re.Max)*sub + int64(re.Max-re.Min) - } - - size = max(1, size) - p.size[re] = size - return size -} - -func (p *parser) checkHeight(re *Regexp) { - if p.numRegexp < maxHeight { - return - } - if p.height == nil { - p.height = make(map[*Regexp]int) - for _, re := range p.stack { - p.checkHeight(re) - } - } - if p.calcHeight(re, true) > maxHeight { - panic(ErrNestingDepth) - } -} - -func (p *parser) calcHeight(re *Regexp, force bool) int { - if !force { - if h, ok := p.height[re]; ok { - return h - } - } - h := 1 - for _, sub := range re.Sub { - hsub := p.calcHeight(sub, false) - if h < 1+hsub { - h = 1 + hsub - } - } - p.height[re] = h - return h -} - -// Parse stack manipulation. - -// push pushes the regexp re onto the parse stack and returns the regexp. -func (p *parser) push(re *Regexp) *Regexp { - p.numRunes += len(re.Rune) - if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] { - // Single rune. - if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) { - return nil - } - re.Op = OpLiteral - re.Rune = re.Rune[:1] - re.Flags = p.flags &^ FoldCase - } else if re.Op == OpCharClass && len(re.Rune) == 4 && - re.Rune[0] == re.Rune[1] && re.Rune[2] == re.Rune[3] && - unicode.SimpleFold(re.Rune[0]) == re.Rune[2] && - unicode.SimpleFold(re.Rune[2]) == re.Rune[0] || - re.Op == OpCharClass && len(re.Rune) == 2 && - re.Rune[0]+1 == re.Rune[1] && - unicode.SimpleFold(re.Rune[0]) == re.Rune[1] && - unicode.SimpleFold(re.Rune[1]) == re.Rune[0] { - // Case-insensitive rune like [Aa] or [Δδ]. - if p.maybeConcat(re.Rune[0], p.flags|FoldCase) { - return nil - } - - // Rewrite as (case-insensitive) literal. - re.Op = OpLiteral - re.Rune = re.Rune[:1] - re.Flags = p.flags | FoldCase - } else { - // Incremental concatenation. - p.maybeConcat(-1, 0) - } - - p.stack = append(p.stack, re) - p.checkLimits(re) - return re -} - -// maybeConcat implements incremental concatenation -// of literal runes into string nodes. The parser calls this -// before each push, so only the top fragment of the stack -// might need processing. Since this is called before a push, -// the topmost literal is no longer subject to operators like * -// (Otherwise ab* would turn into (ab)*.) -// If r >= 0 and there's a node left over, maybeConcat uses it -// to push r with the given flags. -// maybeConcat reports whether r was pushed. -func (p *parser) maybeConcat(r rune, flags Flags) bool { - n := len(p.stack) - if n < 2 { - return false - } - - re1 := p.stack[n-1] - re2 := p.stack[n-2] - if re1.Op != OpLiteral || re2.Op != OpLiteral || re1.Flags&FoldCase != re2.Flags&FoldCase { - return false - } - - // Push re1 into re2. - re2.Rune = append(re2.Rune, re1.Rune...) - - // Reuse re1 if possible. - if r >= 0 { - re1.Rune = re1.Rune0[:1] - re1.Rune[0] = r - re1.Flags = flags - return true - } - - p.stack = p.stack[:n-1] - p.reuse(re1) - return false // did not push r -} - -// literal pushes a literal regexp for the rune r on the stack. -func (p *parser) literal(r rune) { - re := p.newRegexp(OpLiteral) - re.Flags = p.flags - if p.flags&FoldCase != 0 { - r = minFoldRune(r) - } - re.Rune0[0] = r - re.Rune = re.Rune0[:1] - p.push(re) -} - -// minFoldRune returns the minimum rune fold-equivalent to r. -func minFoldRune(r rune) rune { - if r < minFold || r > maxFold { - return r - } - m := r - r0 := r - for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) { - m = min(m, r) - } - return m -} - -// op pushes a regexp with the given op onto the stack -// and returns that regexp. -func (p *parser) op(op Op) *Regexp { - re := p.newRegexp(op) - re.Flags = p.flags - return p.push(re) -} - -// repeat replaces the top stack element with itself repeated according to op, min, max. -// before is the regexp suffix starting at the repetition operator. -// after is the regexp suffix following after the repetition operator. -// repeat returns an updated 'after' and an error, if any. -func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (string, error) { - flags := p.flags - if p.flags&PerlX != 0 { - if len(after) > 0 && after[0] == '?' { - after = after[1:] - flags ^= NonGreedy - } - if lastRepeat != "" { - // In Perl it is not allowed to stack repetition operators: - // a** is a syntax error, not a doubled star, and a++ means - // something else entirely, which we don't support! - return "", &Error{ErrInvalidRepeatOp, lastRepeat[:len(lastRepeat)-len(after)]} - } - } - n := len(p.stack) - if n == 0 { - return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]} - } - sub := p.stack[n-1] - if sub.Op >= opPseudo { - return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]} - } - - re := p.newRegexp(op) - re.Min = min - re.Max = max - re.Flags = flags - re.Sub = re.Sub0[:1] - re.Sub[0] = sub - p.stack[n-1] = re - p.checkLimits(re) - - if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) { - return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} - } - - return after, nil -} - -// repeatIsValid reports whether the repetition re is valid. -// Valid means that the combination of the top-level repetition -// and any inner repetitions does not exceed n copies of the -// innermost thing. -// This function rewalks the regexp tree and is called for every repetition, -// so we have to worry about inducing quadratic behavior in the parser. -// We avoid this by only calling repeatIsValid when min or max >= 2. -// In that case the depth of any >= 2 nesting can only get to 9 without -// triggering a parse error, so each subtree can only be rewalked 9 times. -func repeatIsValid(re *Regexp, n int) bool { - if re.Op == OpRepeat { - m := re.Max - if m == 0 { - return true - } - if m < 0 { - m = re.Min - } - if m > n { - return false - } - if m > 0 { - n /= m - } - } - for _, sub := range re.Sub { - if !repeatIsValid(sub, n) { - return false - } - } - return true -} - -// concat replaces the top of the stack (above the topmost '|' or '(') with its concatenation. -func (p *parser) concat() *Regexp { - p.maybeConcat(-1, 0) - - // Scan down to find pseudo-operator | or (. - i := len(p.stack) - for i > 0 && p.stack[i-1].Op < opPseudo { - i-- - } - subs := p.stack[i:] - p.stack = p.stack[:i] - - // Empty concatenation is special case. - if len(subs) == 0 { - return p.push(p.newRegexp(OpEmptyMatch)) - } - - return p.push(p.collapse(subs, OpConcat)) -} - -// alternate replaces the top of the stack (above the topmost '(') with its alternation. -func (p *parser) alternate() *Regexp { - // Scan down to find pseudo-operator (. - // There are no | above (. - i := len(p.stack) - for i > 0 && p.stack[i-1].Op < opPseudo { - i-- - } - subs := p.stack[i:] - p.stack = p.stack[:i] - - // Make sure top class is clean. - // All the others already are (see swapVerticalBar). - if len(subs) > 0 { - cleanAlt(subs[len(subs)-1]) - } - - // Empty alternate is special case - // (shouldn't happen but easy to handle). - if len(subs) == 0 { - return p.push(p.newRegexp(OpNoMatch)) - } - - return p.push(p.collapse(subs, OpAlternate)) -} - -// cleanAlt cleans re for eventual inclusion in an alternation. -func cleanAlt(re *Regexp) { - switch re.Op { - case OpCharClass: - re.Rune = cleanClass(&re.Rune) - if len(re.Rune) == 2 && re.Rune[0] == 0 && re.Rune[1] == unicode.MaxRune { - re.Rune = nil - re.Op = OpAnyChar - return - } - if len(re.Rune) == 4 && re.Rune[0] == 0 && re.Rune[1] == '\n'-1 && re.Rune[2] == '\n'+1 && re.Rune[3] == unicode.MaxRune { - re.Rune = nil - re.Op = OpAnyCharNotNL - return - } - if cap(re.Rune)-len(re.Rune) > 100 { - // re.Rune will not grow any more. - // Make a copy or inline to reclaim storage. - re.Rune = append(re.Rune0[:0], re.Rune...) - } - } -} - -// collapse returns the result of applying op to sub. -// If sub contains op nodes, they all get hoisted up -// so that there is never a concat of a concat or an -// alternate of an alternate. -func (p *parser) collapse(subs []*Regexp, op Op) *Regexp { - if len(subs) == 1 { - return subs[0] - } - re := p.newRegexp(op) - re.Sub = re.Sub0[:0] - for _, sub := range subs { - if sub.Op == op { - re.Sub = append(re.Sub, sub.Sub...) - p.reuse(sub) - } else { - re.Sub = append(re.Sub, sub) - } - } - if op == OpAlternate { - re.Sub = p.factor(re.Sub) - if len(re.Sub) == 1 { - old := re - re = re.Sub[0] - p.reuse(old) - } - } - return re -} - -// factor factors common prefixes from the alternation list sub. -// It returns a replacement list that reuses the same storage and -// frees (passes to p.reuse) any removed *Regexps. -// -// For example, -// -// ABC|ABD|AEF|BCX|BCY -// -// simplifies by literal prefix extraction to -// -// A(B(C|D)|EF)|BC(X|Y) -// -// which simplifies by character class introduction to -// -// A(B[CD]|EF)|BC[XY] -func (p *parser) factor(sub []*Regexp) []*Regexp { - if len(sub) < 2 { - return sub - } - - // Round 1: Factor out common literal prefixes. - var str []rune - var strflags Flags - start := 0 - out := sub[:0] - for i := 0; i <= len(sub); i++ { - // Invariant: the Regexps that were in sub[0:start] have been - // used or marked for reuse, and the slice space has been reused - // for out (len(out) <= start). - // - // Invariant: sub[start:i] consists of regexps that all begin - // with str as modified by strflags. - var istr []rune - var iflags Flags - if i < len(sub) { - istr, iflags = p.leadingString(sub[i]) - if iflags == strflags { - same := 0 - for same < len(str) && same < len(istr) && str[same] == istr[same] { - same++ - } - if same > 0 { - // Matches at least one rune in current range. - // Keep going around. - str = str[:same] - continue - } - } - } - - // Found end of a run with common leading literal string: - // sub[start:i] all begin with str[0:len(str)], but sub[i] - // does not even begin with str[0]. - // - // Factor out common string and append factored expression to out. - if i == start { - // Nothing to do - run of length 0. - } else if i == start+1 { - // Just one: don't bother factoring. - out = append(out, sub[start]) - } else { - // Construct factored form: prefix(suffix1|suffix2|...) - prefix := p.newRegexp(OpLiteral) - prefix.Flags = strflags - prefix.Rune = append(prefix.Rune[:0], str...) - - for j := start; j < i; j++ { - sub[j] = p.removeLeadingString(sub[j], len(str)) - p.checkLimits(sub[j]) - } - suffix := p.collapse(sub[start:i], OpAlternate) // recurse - - re := p.newRegexp(OpConcat) - re.Sub = append(re.Sub[:0], prefix, suffix) - out = append(out, re) - } - - // Prepare for next iteration. - start = i - str = istr - strflags = iflags - } - sub = out - - // Round 2: Factor out common simple prefixes, - // just the first piece of each concatenation. - // This will be good enough a lot of the time. - // - // Complex subexpressions (e.g. involving quantifiers) - // are not safe to factor because that collapses their - // distinct paths through the automaton, which affects - // correctness in some cases. - start = 0 - out = sub[:0] - var first *Regexp - for i := 0; i <= len(sub); i++ { - // Invariant: the Regexps that were in sub[0:start] have been - // used or marked for reuse, and the slice space has been reused - // for out (len(out) <= start). - // - // Invariant: sub[start:i] consists of regexps that all begin with ifirst. - var ifirst *Regexp - if i < len(sub) { - ifirst = p.leadingRegexp(sub[i]) - if first != nil && first.Equal(ifirst) && - // first must be a character class OR a fixed repeat of a character class. - (isCharClass(first) || (first.Op == OpRepeat && first.Min == first.Max && isCharClass(first.Sub[0]))) { - continue - } - } - - // Found end of a run with common leading regexp: - // sub[start:i] all begin with first but sub[i] does not. - // - // Factor out common regexp and append factored expression to out. - if i == start { - // Nothing to do - run of length 0. - } else if i == start+1 { - // Just one: don't bother factoring. - out = append(out, sub[start]) - } else { - // Construct factored form: prefix(suffix1|suffix2|...) - prefix := first - for j := start; j < i; j++ { - reuse := j != start // prefix came from sub[start] - sub[j] = p.removeLeadingRegexp(sub[j], reuse) - p.checkLimits(sub[j]) - } - suffix := p.collapse(sub[start:i], OpAlternate) // recurse - - re := p.newRegexp(OpConcat) - re.Sub = append(re.Sub[:0], prefix, suffix) - out = append(out, re) - } - - // Prepare for next iteration. - start = i - first = ifirst - } - sub = out - - // Round 3: Collapse runs of single literals into character classes. - start = 0 - out = sub[:0] - for i := 0; i <= len(sub); i++ { - // Invariant: the Regexps that were in sub[0:start] have been - // used or marked for reuse, and the slice space has been reused - // for out (len(out) <= start). - // - // Invariant: sub[start:i] consists of regexps that are either - // literal runes or character classes. - if i < len(sub) && isCharClass(sub[i]) { - continue - } - - // sub[i] is not a char or char class; - // emit char class for sub[start:i]... - if i == start { - // Nothing to do - run of length 0. - } else if i == start+1 { - out = append(out, sub[start]) - } else { - // Make new char class. - // Start with most complex regexp in sub[start]. - max := start - for j := start + 1; j < i; j++ { - if sub[max].Op < sub[j].Op || sub[max].Op == sub[j].Op && len(sub[max].Rune) < len(sub[j].Rune) { - max = j - } - } - sub[start], sub[max] = sub[max], sub[start] - - for j := start + 1; j < i; j++ { - mergeCharClass(sub[start], sub[j]) - p.reuse(sub[j]) - } - cleanAlt(sub[start]) - out = append(out, sub[start]) - } - - // ... and then emit sub[i]. - if i < len(sub) { - out = append(out, sub[i]) - } - start = i + 1 - } - sub = out - - // Round 4: Collapse runs of empty matches into a single empty match. - start = 0 - out = sub[:0] - for i := range sub { - if i+1 < len(sub) && sub[i].Op == OpEmptyMatch && sub[i+1].Op == OpEmptyMatch { - continue - } - out = append(out, sub[i]) - } - sub = out - - return sub -} - -// leadingString returns the leading literal string that re begins with. -// The string refers to storage in re or its children. -func (p *parser) leadingString(re *Regexp) ([]rune, Flags) { - if re.Op == OpConcat && len(re.Sub) > 0 { - re = re.Sub[0] - } - if re.Op != OpLiteral { - return nil, 0 - } - return re.Rune, re.Flags & FoldCase -} - -// removeLeadingString removes the first n leading runes -// from the beginning of re. It returns the replacement for re. -func (p *parser) removeLeadingString(re *Regexp, n int) *Regexp { - if re.Op == OpConcat && len(re.Sub) > 0 { - // Removing a leading string in a concatenation - // might simplify the concatenation. - sub := re.Sub[0] - sub = p.removeLeadingString(sub, n) - re.Sub[0] = sub - if sub.Op == OpEmptyMatch { - p.reuse(sub) - switch len(re.Sub) { - case 0, 1: - // Impossible but handle. - re.Op = OpEmptyMatch - re.Sub = nil - case 2: - old := re - re = re.Sub[1] - p.reuse(old) - default: - copy(re.Sub, re.Sub[1:]) - re.Sub = re.Sub[:len(re.Sub)-1] - } - } - return re - } - - if re.Op == OpLiteral { - re.Rune = re.Rune[:copy(re.Rune, re.Rune[n:])] - if len(re.Rune) == 0 { - re.Op = OpEmptyMatch - } - } - return re -} - -// leadingRegexp returns the leading regexp that re begins with. -// The regexp refers to storage in re or its children. -func (p *parser) leadingRegexp(re *Regexp) *Regexp { - if re.Op == OpEmptyMatch { - return nil - } - if re.Op == OpConcat && len(re.Sub) > 0 { - sub := re.Sub[0] - if sub.Op == OpEmptyMatch { - return nil - } - return sub - } - return re -} - -// removeLeadingRegexp removes the leading regexp in re. -// It returns the replacement for re. -// If reuse is true, it passes the removed regexp (if no longer needed) to p.reuse. -func (p *parser) removeLeadingRegexp(re *Regexp, reuse bool) *Regexp { - if re.Op == OpConcat && len(re.Sub) > 0 { - if reuse { - p.reuse(re.Sub[0]) - } - re.Sub = re.Sub[:copy(re.Sub, re.Sub[1:])] - switch len(re.Sub) { - case 0: - re.Op = OpEmptyMatch - re.Sub = nil - case 1: - old := re - re = re.Sub[0] - p.reuse(old) - } - return re - } - if reuse { - p.reuse(re) - } - return p.newRegexp(OpEmptyMatch) -} - -func literalRegexp(s string, flags Flags) *Regexp { - re := &Regexp{Op: OpLiteral} - re.Flags = flags - re.Rune = re.Rune0[:0] // use local storage for small strings - for _, c := range s { - if len(re.Rune) >= cap(re.Rune) { - // string is too long to fit in Rune0. let Go handle it - re.Rune = []rune(s) - break - } - re.Rune = append(re.Rune, c) - } - return re -} - -// Parsing. - -// Parse parses a regular expression string s, controlled by the specified -// Flags, and returns a regular expression parse tree. The syntax is -// described in the top-level comment. -func Parse(s string, flags Flags) (*Regexp, error) { - return parse(s, flags) -} - -func parse(s string, flags Flags) (_ *Regexp, err error) { - defer func() { - switch r := recover(); r { - default: - panic(r) - case nil: - // ok - case ErrLarge: // too big - err = &Error{Code: ErrLarge, Expr: s} - case ErrNestingDepth: - err = &Error{Code: ErrNestingDepth, Expr: s} - } - }() - - if flags&Literal != 0 { - // Trivial parser for literal string. - if err := checkUTF8(s); err != nil { - return nil, err - } - return literalRegexp(s, flags), nil - } - - // Otherwise, must do real work. - var ( - p parser - c rune - op Op - lastRepeat string - ) - p.flags = flags - p.wholeRegexp = s - t := s - for t != "" { - repeat := "" - BigSwitch: - switch t[0] { - default: - if c, t, err = nextRune(t); err != nil { - return nil, err - } - p.literal(c) - - case '(': - if p.flags&PerlX != 0 && len(t) >= 2 && t[1] == '?' { - // Flag changes and non-capturing groups. - if t, err = p.parsePerlFlags(t); err != nil { - return nil, err - } - break - } - p.numCap++ - p.op(opLeftParen).Cap = p.numCap - t = t[1:] - case '|': - if err = p.parseVerticalBar(); err != nil { - return nil, err - } - t = t[1:] - case ')': - if err = p.parseRightParen(); err != nil { - return nil, err - } - t = t[1:] - case '^': - if p.flags&OneLine != 0 { - p.op(OpBeginText) - } else { - p.op(OpBeginLine) - } - t = t[1:] - case '$': - if p.flags&OneLine != 0 { - p.op(OpEndText).Flags |= WasDollar - } else { - p.op(OpEndLine) - } - t = t[1:] - case '.': - if p.flags&DotNL != 0 { - p.op(OpAnyChar) - } else { - p.op(OpAnyCharNotNL) - } - t = t[1:] - case '[': - if t, err = p.parseClass(t); err != nil { - return nil, err - } - case '*', '+', '?': - before := t - switch t[0] { - case '*': - op = OpStar - case '+': - op = OpPlus - case '?': - op = OpQuest - } - after := t[1:] - if after, err = p.repeat(op, 0, 0, before, after, lastRepeat); err != nil { - return nil, err - } - repeat = before - t = after - case '{': - op = OpRepeat - before := t - min, max, after, ok := p.parseRepeat(t) - if !ok { - // If the repeat cannot be parsed, { is a literal. - p.literal('{') - t = t[1:] - break - } - if min < 0 || min > 1000 || max > 1000 || max >= 0 && min > max { - // Numbers were too big, or max is present and min > max. - return nil, &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} - } - if after, err = p.repeat(op, min, max, before, after, lastRepeat); err != nil { - return nil, err - } - repeat = before - t = after - case '\\': - if p.flags&PerlX != 0 && len(t) >= 2 { - switch t[1] { - case 'A': - p.op(OpBeginText) - t = t[2:] - break BigSwitch - case 'b': - p.op(OpWordBoundary) - t = t[2:] - break BigSwitch - case 'B': - p.op(OpNoWordBoundary) - t = t[2:] - break BigSwitch - case 'C': - // any byte; not supported - return nil, &Error{ErrInvalidEscape, t[:2]} - case 'Q': - // \Q ... \E: the ... is always literals - var lit string - lit, t, _ = strings.Cut(t[2:], `\E`) - for lit != "" { - c, rest, err := nextRune(lit) - if err != nil { - return nil, err - } - p.literal(c) - lit = rest - } - break BigSwitch - case 'z': - p.op(OpEndText) - t = t[2:] - break BigSwitch - } - } - - re := p.newRegexp(OpCharClass) - re.Flags = p.flags - - // Look for Unicode character group like \p{Han} - if len(t) >= 2 && (t[1] == 'p' || t[1] == 'P') { - r, rest, err := p.parseUnicodeClass(t, re.Rune0[:0]) - if err != nil { - return nil, err - } - if r != nil { - re.Rune = r - t = rest - p.push(re) - break BigSwitch - } - } - - // Perl character class escape. - if r, rest := p.parsePerlClassEscape(t, re.Rune0[:0]); r != nil { - re.Rune = r - t = rest - p.push(re) - break BigSwitch - } - p.reuse(re) - - // Ordinary single-character escape. - if c, t, err = p.parseEscape(t); err != nil { - return nil, err - } - p.literal(c) - } - lastRepeat = repeat - } - - p.concat() - if p.swapVerticalBar() { - // pop vertical bar - p.stack = p.stack[:len(p.stack)-1] - } - p.alternate() - - n := len(p.stack) - if n != 1 { - return nil, &Error{ErrMissingParen, s} - } - return p.stack[0], nil -} - -// parseRepeat parses {min} (max=min) or {min,} (max=-1) or {min,max}. -// If s is not of that form, it returns ok == false. -// If s has the right form but the values are too big, it returns min == -1, ok == true. -func (p *parser) parseRepeat(s string) (min, max int, rest string, ok bool) { - if s == "" || s[0] != '{' { - return - } - s = s[1:] - var ok1 bool - if min, s, ok1 = p.parseInt(s); !ok1 { - return - } - if s == "" { - return - } - if s[0] != ',' { - max = min - } else { - s = s[1:] - if s == "" { - return - } - if s[0] == '}' { - max = -1 - } else if max, s, ok1 = p.parseInt(s); !ok1 { - return - } else if max < 0 { - // parseInt found too big a number - min = -1 - } - } - if s == "" || s[0] != '}' { - return - } - rest = s[1:] - ok = true - return -} - -// parsePerlFlags parses a Perl flag setting or non-capturing group or both, -// like (?i) or (?: or (?i:. It removes the prefix from s and updates the parse state. -// The caller must have ensured that s begins with "(?". -func (p *parser) parsePerlFlags(s string) (rest string, err error) { - t := s - - // Check for named captures, first introduced in Python's regexp library. - // As usual, there are three slightly different syntaxes: - // - // (?Pexpr) the original, introduced by Python - // (?expr) the .NET alteration, adopted by Perl 5.10 - // (?'name'expr) another .NET alteration, adopted by Perl 5.10 - // - // Perl 5.10 gave in and implemented the Python version too, - // but they claim that the last two are the preferred forms. - // PCRE and languages based on it (specifically, PHP and Ruby) - // support all three as well. EcmaScript 4 uses only the Python form. - // - // In both the open source world (via Code Search) and the - // Google source tree, (?Pname) and (?name) are the - // dominant forms of named captures and both are supported. - startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<' - startsWithName := len(t) > 3 && t[2] == '<' - - if startsWithP || startsWithName { - // position of expr start - exprStartPos := 4 - if startsWithName { - exprStartPos = 3 - } - - // Pull out name. - end := strings.IndexRune(t, '>') - if end < 0 { - if err = checkUTF8(t); err != nil { - return "", err - } - return "", &Error{ErrInvalidNamedCapture, s} - } - - capture := t[:end+1] // "(?P" or "(?" - name := t[exprStartPos:end] // "name" - if err = checkUTF8(name); err != nil { - return "", err - } - if !isValidCaptureName(name) { - return "", &Error{ErrInvalidNamedCapture, capture} - } - - // Like ordinary capture, but named. - p.numCap++ - re := p.op(opLeftParen) - re.Cap = p.numCap - re.Name = name - return t[end+1:], nil - } - - // Non-capturing group. Might also twiddle Perl flags. - var c rune - t = t[2:] // skip (? - flags := p.flags - sign := +1 - sawFlag := false -Loop: - for t != "" { - if c, t, err = nextRune(t); err != nil { - return "", err - } - switch c { - default: - break Loop - - // Flags. - case 'i': - flags |= FoldCase - sawFlag = true - case 'm': - flags &^= OneLine - sawFlag = true - case 's': - flags |= DotNL - sawFlag = true - case 'U': - flags |= NonGreedy - sawFlag = true - - // Switch to negation. - case '-': - if sign < 0 { - break Loop - } - sign = -1 - // Invert flags so that | above turn into &^ and vice versa. - // We'll invert flags again before using it below. - flags = ^flags - sawFlag = false - - // End of flags, starting group or not. - case ':', ')': - if sign < 0 { - if !sawFlag { - break Loop - } - flags = ^flags - } - if c == ':' { - // Open new group - p.op(opLeftParen) - } - p.flags = flags - return t, nil - } - } - - return "", &Error{ErrInvalidPerlOp, s[:len(s)-len(t)]} -} - -// isValidCaptureName reports whether name -// is a valid capture name: [A-Za-z0-9_]+. -// PCRE limits names to 32 bytes. -// Python rejects names starting with digits. -// We don't enforce either of those. -func isValidCaptureName(name string) bool { - if name == "" { - return false - } - for _, c := range name { - if c != '_' && !isalnum(c) { - return false - } - } - return true -} - -// parseInt parses a decimal integer. -func (p *parser) parseInt(s string) (n int, rest string, ok bool) { - if s == "" || s[0] < '0' || '9' < s[0] { - return - } - // Disallow leading zeros. - if len(s) >= 2 && s[0] == '0' && '0' <= s[1] && s[1] <= '9' { - return - } - t := s - for s != "" && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - rest = s - ok = true - // Have digits, compute value. - t = t[:len(t)-len(s)] - for i := 0; i < len(t); i++ { - // Avoid overflow. - if n >= 1e8 { - n = -1 - break - } - n = n*10 + int(t[i]) - '0' - } - return -} - -// can this be represented as a character class? -// single-rune literal string, char class, ., and .|\n. -func isCharClass(re *Regexp) bool { - return re.Op == OpLiteral && len(re.Rune) == 1 || - re.Op == OpCharClass || - re.Op == OpAnyCharNotNL || - re.Op == OpAnyChar -} - -// does re match r? -func matchRune(re *Regexp, r rune) bool { - switch re.Op { - case OpLiteral: - return len(re.Rune) == 1 && re.Rune[0] == r - case OpCharClass: - for i := 0; i < len(re.Rune); i += 2 { - if re.Rune[i] <= r && r <= re.Rune[i+1] { - return true - } - } - return false - case OpAnyCharNotNL: - return r != '\n' - case OpAnyChar: - return true - } - return false -} - -// parseVerticalBar handles a | in the input. -func (p *parser) parseVerticalBar() error { - p.concat() - - // The concatenation we just parsed is on top of the stack. - // If it sits above an opVerticalBar, swap it below - // (things below an opVerticalBar become an alternation). - // Otherwise, push a new vertical bar. - if !p.swapVerticalBar() { - p.op(opVerticalBar) - } - - return nil -} - -// mergeCharClass makes dst = dst|src. -// The caller must ensure that dst.Op >= src.Op, -// to reduce the amount of copying. -func mergeCharClass(dst, src *Regexp) { - switch dst.Op { - case OpAnyChar: - // src doesn't add anything. - case OpAnyCharNotNL: - // src might add \n - if matchRune(src, '\n') { - dst.Op = OpAnyChar - } - case OpCharClass: - // src is simpler, so either literal or char class - if src.Op == OpLiteral { - dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags) - } else { - dst.Rune = appendClass(dst.Rune, src.Rune) - } - case OpLiteral: - // both literal - if src.Rune[0] == dst.Rune[0] && src.Flags == dst.Flags { - break - } - dst.Op = OpCharClass - dst.Rune = appendLiteral(dst.Rune[:0], dst.Rune[0], dst.Flags) - dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags) - } -} - -// If the top of the stack is an element followed by an opVerticalBar -// swapVerticalBar swaps the two and returns true. -// Otherwise it returns false. -func (p *parser) swapVerticalBar() bool { - // If above and below vertical bar are literal or char class, - // can merge into a single char class. - n := len(p.stack) - if n >= 3 && p.stack[n-2].Op == opVerticalBar && isCharClass(p.stack[n-1]) && isCharClass(p.stack[n-3]) { - re1 := p.stack[n-1] - re3 := p.stack[n-3] - // Make re3 the more complex of the two. - if re1.Op > re3.Op { - re1, re3 = re3, re1 - p.stack[n-3] = re3 - } - mergeCharClass(re3, re1) - p.reuse(re1) - p.stack = p.stack[:n-1] - return true - } - - if n >= 2 { - re1 := p.stack[n-1] - re2 := p.stack[n-2] - if re2.Op == opVerticalBar { - if n >= 3 { - // Now out of reach. - // Clean opportunistically. - cleanAlt(p.stack[n-3]) - } - p.stack[n-2] = re1 - p.stack[n-1] = re2 - return true - } - } - return false -} - -// parseRightParen handles a ) in the input. -func (p *parser) parseRightParen() error { - p.concat() - if p.swapVerticalBar() { - // pop vertical bar - p.stack = p.stack[:len(p.stack)-1] - } - p.alternate() - - n := len(p.stack) - if n < 2 { - return &Error{ErrUnexpectedParen, p.wholeRegexp} - } - re1 := p.stack[n-1] - re2 := p.stack[n-2] - p.stack = p.stack[:n-2] - if re2.Op != opLeftParen { - return &Error{ErrUnexpectedParen, p.wholeRegexp} - } - // Restore flags at time of paren. - p.flags = re2.Flags - if re2.Cap == 0 { - // Just for grouping. - p.push(re1) - } else { - re2.Op = OpCapture - re2.Sub = re2.Sub0[:1] - re2.Sub[0] = re1 - p.push(re2) - } - return nil -} - -// parseEscape parses an escape sequence at the beginning of s -// and returns the rune. -func (p *parser) parseEscape(s string) (r rune, rest string, err error) { - t := s[1:] - if t == "" { - return 0, "", &Error{ErrTrailingBackslash, ""} - } - c, t, err := nextRune(t) - if err != nil { - return 0, "", err - } - -Switch: - switch c { - default: - if c < utf8.RuneSelf && !isalnum(c) { - // Escaped non-word characters are always themselves. - // PCRE is not quite so rigorous: it accepts things like - // \q, but we don't. We once rejected \_, but too many - // programs and people insist on using it, so allow \_. - return c, t, nil - } - - // Octal escapes. - case '1', '2', '3', '4', '5', '6', '7': - // Single non-zero digit is a backreference; not supported - if t == "" || t[0] < '0' || t[0] > '7' { - break - } - fallthrough - case '0': - // Consume up to three octal digits; already have one. - r = c - '0' - for i := 1; i < 3; i++ { - if t == "" || t[0] < '0' || t[0] > '7' { - break - } - r = r*8 + rune(t[0]) - '0' - t = t[1:] - } - return r, t, nil - - // Hexadecimal escapes. - case 'x': - if t == "" { - break - } - if c, t, err = nextRune(t); err != nil { - return 0, "", err - } - if c == '{' { - // Any number of digits in braces. - // Perl accepts any text at all; it ignores all text - // after the first non-hex digit. We require only hex digits, - // and at least one. - nhex := 0 - r = 0 - for { - if t == "" { - break Switch - } - if c, t, err = nextRune(t); err != nil { - return 0, "", err - } - if c == '}' { - break - } - v := unhex(c) - if v < 0 { - break Switch - } - r = r*16 + v - if r > unicode.MaxRune { - break Switch - } - nhex++ - } - if nhex == 0 { - break Switch - } - return r, t, nil - } - - // Easy case: two hex digits. - x := unhex(c) - if c, t, err = nextRune(t); err != nil { - return 0, "", err - } - y := unhex(c) - if x < 0 || y < 0 { - break - } - return x*16 + y, t, nil - - // C escapes. There is no case 'b', to avoid misparsing - // the Perl word-boundary \b as the C backspace \b - // when in POSIX mode. In Perl, /\b/ means word-boundary - // but /[\b]/ means backspace. We don't support that. - // If you want a backspace, embed a literal backspace - // character or use \x08. - case 'a': - return '\a', t, err - case 'f': - return '\f', t, err - case 'n': - return '\n', t, err - case 'r': - return '\r', t, err - case 't': - return '\t', t, err - case 'v': - return '\v', t, err - } - return 0, "", &Error{ErrInvalidEscape, s[:len(s)-len(t)]} -} - -// parseClassChar parses a character class character at the beginning of s -// and returns it. -func (p *parser) parseClassChar(s, wholeClass string) (r rune, rest string, err error) { - if s == "" { - return 0, "", &Error{Code: ErrMissingBracket, Expr: wholeClass} - } - - // Allow regular escape sequences even though - // many need not be escaped in this context. - if s[0] == '\\' { - return p.parseEscape(s) - } - - return nextRune(s) -} - -type charGroup struct { - sign int - class []rune -} - -// parsePerlClassEscape parses a leading Perl character class escape like \d -// from the beginning of s. If one is present, it appends the characters to r -// and returns the new slice r and the remainder of the string. -func (p *parser) parsePerlClassEscape(s string, r []rune) (out []rune, rest string) { - if p.flags&PerlX == 0 || len(s) < 2 || s[0] != '\\' { - return - } - g := perlGroup[s[0:2]] - if g.sign == 0 { - return - } - return p.appendGroup(r, g), s[2:] -} - -// parseNamedClass parses a leading POSIX named character class like [:alnum:] -// from the beginning of s. If one is present, it appends the characters to r -// and returns the new slice r and the remainder of the string. -func (p *parser) parseNamedClass(s string, r []rune) (out []rune, rest string, err error) { - if len(s) < 2 || s[0] != '[' || s[1] != ':' { - return - } - - i := strings.Index(s[2:], ":]") - if i < 0 { - return - } - i += 2 - name, s := s[0:i+2], s[i+2:] - g := posixGroup[name] - if g.sign == 0 { - return nil, "", &Error{ErrInvalidCharRange, name} - } - return p.appendGroup(r, g), s, nil -} - -func (p *parser) appendGroup(r []rune, g charGroup) []rune { - if p.flags&FoldCase == 0 { - if g.sign < 0 { - r = appendNegatedClass(r, g.class) - } else { - r = appendClass(r, g.class) - } - } else { - tmp := p.tmpClass[:0] - tmp = appendFoldedClass(tmp, g.class) - p.tmpClass = tmp - tmp = cleanClass(&p.tmpClass) - if g.sign < 0 { - r = appendNegatedClass(r, tmp) - } else { - r = appendClass(r, tmp) - } - } - return r -} - -var anyTable = &unicode.RangeTable{ - R16: []unicode.Range16{{Lo: 0, Hi: 1<<16 - 1, Stride: 1}}, - R32: []unicode.Range32{{Lo: 1 << 16, Hi: unicode.MaxRune, Stride: 1}}, -} - -// unicodeTable returns the unicode.RangeTable identified by name -// and the table of additional fold-equivalent code points. -func unicodeTable(name string) (*unicode.RangeTable, *unicode.RangeTable) { - // Special case: "Any" means any. - if name == "Any" { - return anyTable, anyTable - } - if t := unicode.Categories[name]; t != nil { - return t, unicode.FoldCategory[name] - } - if t := unicode.Scripts[name]; t != nil { - return t, unicode.FoldScript[name] - } - return nil, nil -} - -// parseUnicodeClass parses a leading Unicode character class like \p{Han} -// from the beginning of s. If one is present, it appends the characters to r -// and returns the new slice r and the remainder of the string. -func (p *parser) parseUnicodeClass(s string, r []rune) (out []rune, rest string, err error) { - if p.flags&UnicodeGroups == 0 || len(s) < 2 || s[0] != '\\' || s[1] != 'p' && s[1] != 'P' { - return - } - - // Committed to parse or return error. - sign := +1 - if s[1] == 'P' { - sign = -1 - } - t := s[2:] - c, t, err := nextRune(t) - if err != nil { - return - } - var seq, name string - if c != '{' { - // Single-letter name. - seq = s[:len(s)-len(t)] - name = seq[2:] - } else { - // Name is in braces. - end := strings.IndexRune(s, '}') - if end < 0 { - if err = checkUTF8(s); err != nil { - return - } - return nil, "", &Error{ErrInvalidCharRange, s} - } - seq, t = s[:end+1], s[end+1:] - name = s[3:end] - if err = checkUTF8(name); err != nil { - return - } - } - - // Group can have leading negation too. \p{^Han} == \P{Han}, \P{^Han} == \p{Han}. - if name != "" && name[0] == '^' { - sign = -sign - name = name[1:] - } - - tab, fold := unicodeTable(name) - if tab == nil { - return nil, "", &Error{ErrInvalidCharRange, seq} - } - - if p.flags&FoldCase == 0 || fold == nil { - if sign > 0 { - r = appendTable(r, tab) - } else { - r = appendNegatedTable(r, tab) - } - } else { - // Merge and clean tab and fold in a temporary buffer. - // This is necessary for the negative case and just tidy - // for the positive case. - tmp := p.tmpClass[:0] - tmp = appendTable(tmp, tab) - tmp = appendTable(tmp, fold) - p.tmpClass = tmp - tmp = cleanClass(&p.tmpClass) - if sign > 0 { - r = appendClass(r, tmp) - } else { - r = appendNegatedClass(r, tmp) - } - } - return r, t, nil -} - -// parseClass parses a character class at the beginning of s -// and pushes it onto the parse stack. -func (p *parser) parseClass(s string) (rest string, err error) { - t := s[1:] // chop [ - re := p.newRegexp(OpCharClass) - re.Flags = p.flags - re.Rune = re.Rune0[:0] - - sign := +1 - if t != "" && t[0] == '^' { - sign = -1 - t = t[1:] - - // If character class does not match \n, add it here, - // so that negation later will do the right thing. - if p.flags&ClassNL == 0 { - re.Rune = append(re.Rune, '\n', '\n') - } - } - - class := re.Rune - first := true // ] and - are okay as first char in class - for t == "" || t[0] != ']' || first { - // POSIX: - is only okay unescaped as first or last in class. - // Perl: - is okay anywhere. - if t != "" && t[0] == '-' && p.flags&PerlX == 0 && !first && (len(t) == 1 || t[1] != ']') { - _, size := utf8.DecodeRuneInString(t[1:]) - return "", &Error{Code: ErrInvalidCharRange, Expr: t[:1+size]} - } - first = false - - // Look for POSIX [:alnum:] etc. - if len(t) > 2 && t[0] == '[' && t[1] == ':' { - nclass, nt, err := p.parseNamedClass(t, class) - if err != nil { - return "", err - } - if nclass != nil { - class, t = nclass, nt - continue - } - } - - // Look for Unicode character group like \p{Han}. - nclass, nt, err := p.parseUnicodeClass(t, class) - if err != nil { - return "", err - } - if nclass != nil { - class, t = nclass, nt - continue - } - - // Look for Perl character class symbols (extension). - if nclass, nt := p.parsePerlClassEscape(t, class); nclass != nil { - class, t = nclass, nt - continue - } - - // Single character or simple range. - rng := t - var lo, hi rune - if lo, t, err = p.parseClassChar(t, s); err != nil { - return "", err - } - hi = lo - // [a-] means (a|-) so check for final ]. - if len(t) >= 2 && t[0] == '-' && t[1] != ']' { - t = t[1:] - if hi, t, err = p.parseClassChar(t, s); err != nil { - return "", err - } - if hi < lo { - rng = rng[:len(rng)-len(t)] - return "", &Error{Code: ErrInvalidCharRange, Expr: rng} - } - } - if p.flags&FoldCase == 0 { - class = appendRange(class, lo, hi) - } else { - class = appendFoldedRange(class, lo, hi) - } - } - t = t[1:] // chop ] - - // Use &re.Rune instead of &class to avoid allocation. - re.Rune = class - class = cleanClass(&re.Rune) - if sign < 0 { - class = negateClass(class) - } - re.Rune = class - p.push(re) - return t, nil -} - -// cleanClass sorts the ranges (pairs of elements of r), -// merges them, and eliminates duplicates. -func cleanClass(rp *[]rune) []rune { - - // Sort by lo increasing, hi decreasing to break ties. - sort.Sort(ranges{rp}) - - r := *rp - if len(r) < 2 { - return r - } - - // Merge abutting, overlapping. - w := 2 // write index - for i := 2; i < len(r); i += 2 { - lo, hi := r[i], r[i+1] - if lo <= r[w-1]+1 { - // merge with previous range - if hi > r[w-1] { - r[w-1] = hi - } - continue - } - // new disjoint range - r[w] = lo - r[w+1] = hi - w += 2 - } - - return r[:w] -} - -// inCharClass reports whether r is in the class. -// It assumes the class has been cleaned by cleanClass. -func inCharClass(r rune, class []rune) bool { - _, ok := sort.Find(len(class)/2, func(i int) int { - lo, hi := class[2*i], class[2*i+1] - if r > hi { - return +1 - } - if r < lo { - return -1 - } - return 0 - }) - return ok -} - -// appendLiteral returns the result of appending the literal x to the class r. -func appendLiteral(r []rune, x rune, flags Flags) []rune { - if flags&FoldCase != 0 { - return appendFoldedRange(r, x, x) - } - return appendRange(r, x, x) -} - -// appendRange returns the result of appending the range lo-hi to the class r. -func appendRange(r []rune, lo, hi rune) []rune { - // Expand last range or next to last range if it overlaps or abuts. - // Checking two ranges helps when appending case-folded - // alphabets, so that one range can be expanding A-Z and the - // other expanding a-z. - n := len(r) - for i := 2; i <= 4; i += 2 { // twice, using i=2, i=4 - if n >= i { - rlo, rhi := r[n-i], r[n-i+1] - if lo <= rhi+1 && rlo <= hi+1 { - if lo < rlo { - r[n-i] = lo - } - if hi > rhi { - r[n-i+1] = hi - } - return r - } - } - } - - return append(r, lo, hi) -} - -const ( - // minimum and maximum runes involved in folding. - // checked during test. - minFold = 0x0041 - maxFold = 0x1e943 -) - -// appendFoldedRange returns the result of appending the range lo-hi -// and its case folding-equivalent runes to the class r. -func appendFoldedRange(r []rune, lo, hi rune) []rune { - // Optimizations. - if lo <= minFold && hi >= maxFold { - // Range is full: folding can't add more. - return appendRange(r, lo, hi) - } - if hi < minFold || lo > maxFold { - // Range is outside folding possibilities. - return appendRange(r, lo, hi) - } - if lo < minFold { - // [lo, minFold-1] needs no folding. - r = appendRange(r, lo, minFold-1) - lo = minFold - } - if hi > maxFold { - // [maxFold+1, hi] needs no folding. - r = appendRange(r, maxFold+1, hi) - hi = maxFold - } - - // Brute force. Depend on appendRange to coalesce ranges on the fly. - for c := lo; c <= hi; c++ { - r = appendRange(r, c, c) - f := unicode.SimpleFold(c) - for f != c { - r = appendRange(r, f, f) - f = unicode.SimpleFold(f) - } - } - return r -} - -// appendClass returns the result of appending the class x to the class r. -// It assume x is clean. -func appendClass(r []rune, x []rune) []rune { - for i := 0; i < len(x); i += 2 { - r = appendRange(r, x[i], x[i+1]) - } - return r -} - -// appendFoldedClass returns the result of appending the case folding of the class x to the class r. -func appendFoldedClass(r []rune, x []rune) []rune { - for i := 0; i < len(x); i += 2 { - r = appendFoldedRange(r, x[i], x[i+1]) - } - return r -} - -// appendNegatedClass returns the result of appending the negation of the class x to the class r. -// It assumes x is clean. -func appendNegatedClass(r []rune, x []rune) []rune { - nextLo := '\u0000' - for i := 0; i < len(x); i += 2 { - lo, hi := x[i], x[i+1] - if nextLo <= lo-1 { - r = appendRange(r, nextLo, lo-1) - } - nextLo = hi + 1 - } - if nextLo <= unicode.MaxRune { - r = appendRange(r, nextLo, unicode.MaxRune) - } - return r -} - -// appendTable returns the result of appending x to the class r. -func appendTable(r []rune, x *unicode.RangeTable) []rune { - for _, xr := range x.R16 { - lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) - if stride == 1 { - r = appendRange(r, lo, hi) - continue - } - for c := lo; c <= hi; c += stride { - r = appendRange(r, c, c) - } - } - for _, xr := range x.R32 { - lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) - if stride == 1 { - r = appendRange(r, lo, hi) - continue - } - for c := lo; c <= hi; c += stride { - r = appendRange(r, c, c) - } - } - return r -} - -// appendNegatedTable returns the result of appending the negation of x to the class r. -func appendNegatedTable(r []rune, x *unicode.RangeTable) []rune { - nextLo := '\u0000' // lo end of next class to add - for _, xr := range x.R16 { - lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) - if stride == 1 { - if nextLo <= lo-1 { - r = appendRange(r, nextLo, lo-1) - } - nextLo = hi + 1 - continue - } - for c := lo; c <= hi; c += stride { - if nextLo <= c-1 { - r = appendRange(r, nextLo, c-1) - } - nextLo = c + 1 - } - } - for _, xr := range x.R32 { - lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) - if stride == 1 { - if nextLo <= lo-1 { - r = appendRange(r, nextLo, lo-1) - } - nextLo = hi + 1 - continue - } - for c := lo; c <= hi; c += stride { - if nextLo <= c-1 { - r = appendRange(r, nextLo, c-1) - } - nextLo = c + 1 - } - } - if nextLo <= unicode.MaxRune { - r = appendRange(r, nextLo, unicode.MaxRune) - } - return r -} - -// negateClass overwrites r and returns r's negation. -// It assumes the class r is already clean. -func negateClass(r []rune) []rune { - nextLo := '\u0000' // lo end of next class to add - w := 0 // write index - for i := 0; i < len(r); i += 2 { - lo, hi := r[i], r[i+1] - if nextLo <= lo-1 { - r[w] = nextLo - r[w+1] = lo - 1 - w += 2 - } - nextLo = hi + 1 - } - r = r[:w] - if nextLo <= unicode.MaxRune { - // It's possible for the negation to have one more - // range - this one - than the original class, so use append. - r = append(r, nextLo, unicode.MaxRune) - } - return r -} - -// ranges implements sort.Interface on a []rune. -// The choice of receiver type definition is strange -// but avoids an allocation since we already have -// a *[]rune. -type ranges struct { - p *[]rune -} - -func (ra ranges) Less(i, j int) bool { - p := *ra.p - i *= 2 - j *= 2 - return p[i] < p[j] || p[i] == p[j] && p[i+1] > p[j+1] -} - -func (ra ranges) Len() int { - return len(*ra.p) / 2 -} - -func (ra ranges) Swap(i, j int) { - p := *ra.p - i *= 2 - j *= 2 - p[i], p[i+1], p[j], p[j+1] = p[j], p[j+1], p[i], p[i+1] -} - -func checkUTF8(s string) error { - for s != "" { - rune, size := utf8.DecodeRuneInString(s) - if rune == utf8.RuneError && size == 1 { - return &Error{Code: ErrInvalidUTF8, Expr: s} - } - s = s[size:] - } - return nil -} - -func nextRune(s string) (c rune, t string, err error) { - c, size := utf8.DecodeRuneInString(s) - if c == utf8.RuneError && size == 1 { - return 0, "", &Error{Code: ErrInvalidUTF8, Expr: s} - } - return c, s[size:], nil -} - -func isalnum(c rune) bool { - return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' -} - -func unhex(c rune) rune { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return c - 'a' + 10 - } - if 'A' <= c && c <= 'F' { - return c - 'A' + 10 - } - return -1 -} diff --git a/vendor/github.com/grafana/regexp/syntax/perl_groups.go b/vendor/github.com/grafana/regexp/syntax/perl_groups.go deleted file mode 100644 index effe4e68..00000000 --- a/vendor/github.com/grafana/regexp/syntax/perl_groups.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// GENERATED BY make_perl_groups.pl; DO NOT EDIT. -// make_perl_groups.pl >perl_groups.go - -package syntax - -var code1 = []rune{ /* \d */ - 0x30, 0x39, -} - -var code2 = []rune{ /* \s */ - 0x9, 0xa, - 0xc, 0xd, - 0x20, 0x20, -} - -var code3 = []rune{ /* \w */ - 0x30, 0x39, - 0x41, 0x5a, - 0x5f, 0x5f, - 0x61, 0x7a, -} - -var perlGroup = map[string]charGroup{ - `\d`: {+1, code1}, - `\D`: {-1, code1}, - `\s`: {+1, code2}, - `\S`: {-1, code2}, - `\w`: {+1, code3}, - `\W`: {-1, code3}, -} -var code4 = []rune{ /* [:alnum:] */ - 0x30, 0x39, - 0x41, 0x5a, - 0x61, 0x7a, -} - -var code5 = []rune{ /* [:alpha:] */ - 0x41, 0x5a, - 0x61, 0x7a, -} - -var code6 = []rune{ /* [:ascii:] */ - 0x0, 0x7f, -} - -var code7 = []rune{ /* [:blank:] */ - 0x9, 0x9, - 0x20, 0x20, -} - -var code8 = []rune{ /* [:cntrl:] */ - 0x0, 0x1f, - 0x7f, 0x7f, -} - -var code9 = []rune{ /* [:digit:] */ - 0x30, 0x39, -} - -var code10 = []rune{ /* [:graph:] */ - 0x21, 0x7e, -} - -var code11 = []rune{ /* [:lower:] */ - 0x61, 0x7a, -} - -var code12 = []rune{ /* [:print:] */ - 0x20, 0x7e, -} - -var code13 = []rune{ /* [:punct:] */ - 0x21, 0x2f, - 0x3a, 0x40, - 0x5b, 0x60, - 0x7b, 0x7e, -} - -var code14 = []rune{ /* [:space:] */ - 0x9, 0xd, - 0x20, 0x20, -} - -var code15 = []rune{ /* [:upper:] */ - 0x41, 0x5a, -} - -var code16 = []rune{ /* [:word:] */ - 0x30, 0x39, - 0x41, 0x5a, - 0x5f, 0x5f, - 0x61, 0x7a, -} - -var code17 = []rune{ /* [:xdigit:] */ - 0x30, 0x39, - 0x41, 0x46, - 0x61, 0x66, -} - -var posixGroup = map[string]charGroup{ - `[:alnum:]`: {+1, code4}, - `[:^alnum:]`: {-1, code4}, - `[:alpha:]`: {+1, code5}, - `[:^alpha:]`: {-1, code5}, - `[:ascii:]`: {+1, code6}, - `[:^ascii:]`: {-1, code6}, - `[:blank:]`: {+1, code7}, - `[:^blank:]`: {-1, code7}, - `[:cntrl:]`: {+1, code8}, - `[:^cntrl:]`: {-1, code8}, - `[:digit:]`: {+1, code9}, - `[:^digit:]`: {-1, code9}, - `[:graph:]`: {+1, code10}, - `[:^graph:]`: {-1, code10}, - `[:lower:]`: {+1, code11}, - `[:^lower:]`: {-1, code11}, - `[:print:]`: {+1, code12}, - `[:^print:]`: {-1, code12}, - `[:punct:]`: {+1, code13}, - `[:^punct:]`: {-1, code13}, - `[:space:]`: {+1, code14}, - `[:^space:]`: {-1, code14}, - `[:upper:]`: {+1, code15}, - `[:^upper:]`: {-1, code15}, - `[:word:]`: {+1, code16}, - `[:^word:]`: {-1, code16}, - `[:xdigit:]`: {+1, code17}, - `[:^xdigit:]`: {-1, code17}, -} diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go deleted file mode 100644 index 6a3705ec..00000000 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -import ( - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// Compiled program. -// May not belong in this package, but convenient for now. - -// A Prog is a compiled regular expression program. -type Prog struct { - Inst []Inst - Start int // index of start instruction - NumCap int // number of InstCapture insts in re -} - -// An InstOp is an instruction opcode. -type InstOp uint8 - -const ( - InstAlt InstOp = iota - InstAltMatch - InstCapture - InstEmptyWidth - InstMatch - InstFail - InstNop - InstRune - InstRune1 - InstRuneAny - InstRuneAnyNotNL -) - -var instOpNames = []string{ - "InstAlt", - "InstAltMatch", - "InstCapture", - "InstEmptyWidth", - "InstMatch", - "InstFail", - "InstNop", - "InstRune", - "InstRune1", - "InstRuneAny", - "InstRuneAnyNotNL", -} - -func (i InstOp) String() string { - if uint(i) >= uint(len(instOpNames)) { - return "" - } - return instOpNames[i] -} - -// An EmptyOp specifies a kind or mixture of zero-width assertions. -type EmptyOp uint8 - -const ( - EmptyBeginLine EmptyOp = 1 << iota - EmptyEndLine - EmptyBeginText - EmptyEndText - EmptyWordBoundary - EmptyNoWordBoundary -) - -// EmptyOpContext returns the zero-width assertions -// satisfied at the position between the runes r1 and r2. -// Passing r1 == -1 indicates that the position is -// at the beginning of the text. -// Passing r2 == -1 indicates that the position is -// at the end of the text. -func EmptyOpContext(r1, r2 rune) EmptyOp { - var op EmptyOp = EmptyNoWordBoundary - var boundary byte - switch { - case IsWordChar(r1): - boundary = 1 - case r1 == '\n': - op |= EmptyBeginLine - case r1 < 0: - op |= EmptyBeginText | EmptyBeginLine - } - switch { - case IsWordChar(r2): - boundary ^= 1 - case r2 == '\n': - op |= EmptyEndLine - case r2 < 0: - op |= EmptyEndText | EmptyEndLine - } - if boundary != 0 { // IsWordChar(r1) != IsWordChar(r2) - op ^= (EmptyWordBoundary | EmptyNoWordBoundary) - } - return op -} - -// IsWordChar reports whether r is considered a “word character” -// during the evaluation of the \b and \B zero-width assertions. -// These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. -func IsWordChar(r rune) bool { - // Test for lowercase letters first, as these occur more - // frequently than uppercase letters in common cases. - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' -} - -// An Inst is a single instruction in a regular expression program. -type Inst struct { - Op InstOp - Out uint32 // all but InstMatch, InstFail - Arg uint32 // InstAlt, InstAltMatch, InstCapture, InstEmptyWidth - Rune []rune -} - -func (p *Prog) String() string { - var b strings.Builder - dumpProg(&b, p) - return b.String() -} - -// skipNop follows any no-op or capturing instructions. -func (p *Prog) skipNop(pc uint32) *Inst { - i := &p.Inst[pc] - for i.Op == InstNop || i.Op == InstCapture { - i = &p.Inst[i.Out] - } - return i -} - -// op returns i.Op but merges all the Rune special cases into InstRune -func (i *Inst) op() InstOp { - op := i.Op - switch op { - case InstRune1, InstRuneAny, InstRuneAnyNotNL: - op = InstRune - } - return op -} - -// Prefix returns a literal string that all matches for the -// regexp must start with. Complete is true if the prefix -// is the entire match. -func (p *Prog) Prefix() (prefix string, complete bool) { - i := p.skipNop(uint32(p.Start)) - - // Avoid allocation of buffer if prefix is empty. - if i.op() != InstRune || len(i.Rune) != 1 { - return "", i.Op == InstMatch - } - - // Have prefix; gather characters. - var buf strings.Builder - for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError { - buf.WriteRune(i.Rune[0]) - i = p.skipNop(i.Out) - } - return buf.String(), i.Op == InstMatch -} - -// StartCond returns the leading empty-width conditions that must -// be true in any match. It returns ^EmptyOp(0) if no matches are possible. -func (p *Prog) StartCond() EmptyOp { - var flag EmptyOp - pc := uint32(p.Start) - i := &p.Inst[pc] -Loop: - for { - switch i.Op { - case InstEmptyWidth: - flag |= EmptyOp(i.Arg) - case InstFail: - return ^EmptyOp(0) - case InstCapture, InstNop: - // skip - default: - break Loop - } - pc = i.Out - i = &p.Inst[pc] - } - return flag -} - -const noMatch = -1 - -// MatchRune reports whether the instruction matches (and consumes) r. -// It should only be called when i.Op == [InstRune]. -func (i *Inst) MatchRune(r rune) bool { - return i.MatchRunePos(r) != noMatch -} - -// MatchRunePos checks whether the instruction matches (and consumes) r. -// If so, MatchRunePos returns the index of the matching rune pair -// (or, when len(i.Rune) == 1, rune singleton). -// If not, MatchRunePos returns -1. -// MatchRunePos should only be called when i.Op == [InstRune]. -func (i *Inst) MatchRunePos(r rune) int { - rune := i.Rune - - switch len(rune) { - case 0: - return noMatch - - case 1: - // Special case: single-rune slice is from literal string, not char class. - r0 := rune[0] - if r == r0 { - return 0 - } - if Flags(i.Arg)&FoldCase != 0 { - for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { - if r == r1 { - return 0 - } - } - } - return noMatch - - case 2: - if r >= rune[0] && r <= rune[1] { - return 0 - } - return noMatch - - case 4, 6, 8: - // Linear search for a few pairs. - // Should handle ASCII well. - for j := 0; j < len(rune); j += 2 { - if r < rune[j] { - return noMatch - } - if r <= rune[j+1] { - return j / 2 - } - } - return noMatch - } - - // Otherwise binary search. - lo := 0 - hi := len(rune) / 2 - for lo < hi { - m := int(uint(lo+hi) >> 1) - if c := rune[2*m]; c <= r { - if r <= rune[2*m+1] { - return m - } - lo = m + 1 - } else { - hi = m - } - } - return noMatch -} - -// MatchEmptyWidth reports whether the instruction matches -// an empty string between the runes before and after. -// It should only be called when i.Op == [InstEmptyWidth]. -func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { - switch EmptyOp(i.Arg) { - case EmptyBeginLine: - return before == '\n' || before == -1 - case EmptyEndLine: - return after == '\n' || after == -1 - case EmptyBeginText: - return before == -1 - case EmptyEndText: - return after == -1 - case EmptyWordBoundary: - return IsWordChar(before) != IsWordChar(after) - case EmptyNoWordBoundary: - return IsWordChar(before) == IsWordChar(after) - } - panic("unknown empty width arg") -} - -func (i *Inst) String() string { - var b strings.Builder - dumpInst(&b, i) - return b.String() -} - -func bw(b *strings.Builder, args ...string) { - for _, s := range args { - b.WriteString(s) - } -} - -func dumpProg(b *strings.Builder, p *Prog) { - for j := range p.Inst { - i := &p.Inst[j] - pc := strconv.Itoa(j) - if len(pc) < 3 { - b.WriteString(" "[len(pc):]) - } - if j == p.Start { - pc += "*" - } - bw(b, pc, "\t") - dumpInst(b, i) - bw(b, "\n") - } -} - -func u32(i uint32) string { - return strconv.FormatUint(uint64(i), 10) -} - -func dumpInst(b *strings.Builder, i *Inst) { - switch i.Op { - case InstAlt: - bw(b, "alt -> ", u32(i.Out), ", ", u32(i.Arg)) - case InstAltMatch: - bw(b, "altmatch -> ", u32(i.Out), ", ", u32(i.Arg)) - case InstCapture: - bw(b, "cap ", u32(i.Arg), " -> ", u32(i.Out)) - case InstEmptyWidth: - bw(b, "empty ", u32(i.Arg), " -> ", u32(i.Out)) - case InstMatch: - bw(b, "match") - case InstFail: - bw(b, "fail") - case InstNop: - bw(b, "nop -> ", u32(i.Out)) - case InstRune: - if i.Rune == nil { - // shouldn't happen - bw(b, "rune ") - } - bw(b, "rune ", strconv.QuoteToASCII(string(i.Rune))) - if Flags(i.Arg)&FoldCase != 0 { - bw(b, "/i") - } - bw(b, " -> ", u32(i.Out)) - case InstRune1: - bw(b, "rune1 ", strconv.QuoteToASCII(string(i.Rune)), " -> ", u32(i.Out)) - case InstRuneAny: - bw(b, "any -> ", u32(i.Out)) - case InstRuneAnyNotNL: - bw(b, "anynotnl -> ", u32(i.Out)) - } -} diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go deleted file mode 100644 index 8ad3653a..00000000 --- a/vendor/github.com/grafana/regexp/syntax/regexp.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Note to implementers: -// In this package, re is always a *Regexp and r is always a rune. - -import ( - "slices" - "strconv" - "strings" - "unicode" -) - -// A Regexp is a node in a regular expression syntax tree. -type Regexp struct { - Op Op // operator - Flags Flags - Sub []*Regexp // subexpressions, if any - Sub0 [1]*Regexp // storage for short Sub - Rune []rune // matched runes, for OpLiteral, OpCharClass - Rune0 [2]rune // storage for short Rune - Min, Max int // min, max for OpRepeat - Cap int // capturing index, for OpCapture - Name string // capturing name, for OpCapture -} - -//go:generate stringer -type Op -trimprefix Op - -// An Op is a single regular expression operator. -type Op uint8 - -// Operators are listed in precedence order, tightest binding to weakest. -// Character class operators are listed simplest to most complex -// (OpLiteral, OpCharClass, OpAnyCharNotNL, OpAnyChar). - -const ( - OpNoMatch Op = 1 + iota // matches no strings - OpEmptyMatch // matches empty string - OpLiteral // matches Runes sequence - OpCharClass // matches Runes interpreted as range pair list - OpAnyCharNotNL // matches any character except newline - OpAnyChar // matches any character - OpBeginLine // matches empty string at beginning of line - OpEndLine // matches empty string at end of line - OpBeginText // matches empty string at beginning of text - OpEndText // matches empty string at end of text - OpWordBoundary // matches word boundary `\b` - OpNoWordBoundary // matches word non-boundary `\B` - OpCapture // capturing subexpression with index Cap, optional name Name - OpStar // matches Sub[0] zero or more times - OpPlus // matches Sub[0] one or more times - OpQuest // matches Sub[0] zero or one times - OpRepeat // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit) - OpConcat // matches concatenation of Subs - OpAlternate // matches alternation of Subs -) - -const opPseudo Op = 128 // where pseudo-ops start - -// Equal reports whether x and y have identical structure. -func (x *Regexp) Equal(y *Regexp) bool { - if x == nil || y == nil { - return x == y - } - if x.Op != y.Op { - return false - } - switch x.Op { - case OpEndText: - // The parse flags remember whether this is \z or \Z. - if x.Flags&WasDollar != y.Flags&WasDollar { - return false - } - - case OpLiteral, OpCharClass: - return slices.Equal(x.Rune, y.Rune) - - case OpAlternate, OpConcat: - return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) }) - - case OpStar, OpPlus, OpQuest: - if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) { - return false - } - - case OpRepeat: - if x.Flags&NonGreedy != y.Flags&NonGreedy || x.Min != y.Min || x.Max != y.Max || !x.Sub[0].Equal(y.Sub[0]) { - return false - } - - case OpCapture: - if x.Cap != y.Cap || x.Name != y.Name || !x.Sub[0].Equal(y.Sub[0]) { - return false - } - } - return true -} - -// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp. -type printFlags uint8 - -const ( - flagI printFlags = 1 << iota // (?i: - flagM // (?m: - flagS // (?s: - flagOff // ) - flagPrec // (?: ) - negShift = 5 // flagI<") - case OpNoMatch: - b.WriteString(`[^\x00-\x{10FFFF}]`) - case OpEmptyMatch: - b.WriteString(`(?:)`) - case OpLiteral: - for _, r := range re.Rune { - escape(b, r, false) - } - case OpCharClass: - if len(re.Rune)%2 != 0 { - b.WriteString(`[invalid char class]`) - break - } - b.WriteRune('[') - if len(re.Rune) == 0 { - b.WriteString(`^\x00-\x{10FFFF}`) - } else if re.Rune[0] == 0 && re.Rune[len(re.Rune)-1] == unicode.MaxRune && len(re.Rune) > 2 { - // Contains 0 and MaxRune. Probably a negated class. - // Print the gaps. - b.WriteRune('^') - for i := 1; i < len(re.Rune)-1; i += 2 { - lo, hi := re.Rune[i]+1, re.Rune[i+1]-1 - escape(b, lo, lo == '-') - if lo != hi { - if hi != lo+1 { - b.WriteRune('-') - } - escape(b, hi, hi == '-') - } - } - } else { - for i := 0; i < len(re.Rune); i += 2 { - lo, hi := re.Rune[i], re.Rune[i+1] - escape(b, lo, lo == '-') - if lo != hi { - if hi != lo+1 { - b.WriteRune('-') - } - escape(b, hi, hi == '-') - } - } - } - b.WriteRune(']') - case OpAnyCharNotNL, OpAnyChar: - b.WriteString(`.`) - case OpBeginLine: - b.WriteString(`^`) - case OpEndLine: - b.WriteString(`$`) - case OpBeginText: - b.WriteString(`\A`) - case OpEndText: - if re.Flags&WasDollar != 0 { - b.WriteString(`$`) - } else { - b.WriteString(`\z`) - } - case OpWordBoundary: - b.WriteString(`\b`) - case OpNoWordBoundary: - b.WriteString(`\B`) - case OpCapture: - if re.Name != "" { - b.WriteString(`(?P<`) - b.WriteString(re.Name) - b.WriteRune('>') - } else { - b.WriteRune('(') - } - if re.Sub[0].Op != OpEmptyMatch { - writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags) - } - b.WriteRune(')') - case OpStar, OpPlus, OpQuest, OpRepeat: - p := printFlags(0) - sub := re.Sub[0] - if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { - p = flagPrec - } - writeRegexp(b, sub, p, flags) - - switch re.Op { - case OpStar: - b.WriteRune('*') - case OpPlus: - b.WriteRune('+') - case OpQuest: - b.WriteRune('?') - case OpRepeat: - b.WriteRune('{') - b.WriteString(strconv.Itoa(re.Min)) - if re.Max != re.Min { - b.WriteRune(',') - if re.Max >= 0 { - b.WriteString(strconv.Itoa(re.Max)) - } - } - b.WriteRune('}') - } - if re.Flags&NonGreedy != 0 { - b.WriteRune('?') - } - case OpConcat: - for _, sub := range re.Sub { - p := printFlags(0) - if sub.Op == OpAlternate { - p = flagPrec - } - writeRegexp(b, sub, p, flags) - } - case OpAlternate: - for i, sub := range re.Sub { - if i > 0 { - b.WriteRune('|') - } - writeRegexp(b, sub, 0, flags) - } - } -} - -func (re *Regexp) String() string { - var b strings.Builder - var flags map[*Regexp]printFlags - must, cant := calcFlags(re, &flags) - must |= (cant &^ flagI) << negShift - if must != 0 { - must |= flagOff - } - writeRegexp(&b, re, must, flags) - return b.String() -} - -const meta = `\.+*?()|[]{}^$` - -func escape(b *strings.Builder, r rune, force bool) { - if unicode.IsPrint(r) { - if strings.ContainsRune(meta, r) || force { - b.WriteRune('\\') - } - b.WriteRune(r) - return - } - - switch r { - case '\a': - b.WriteString(`\a`) - case '\f': - b.WriteString(`\f`) - case '\n': - b.WriteString(`\n`) - case '\r': - b.WriteString(`\r`) - case '\t': - b.WriteString(`\t`) - case '\v': - b.WriteString(`\v`) - default: - if r < 0x100 { - b.WriteString(`\x`) - s := strconv.FormatInt(int64(r), 16) - if len(s) == 1 { - b.WriteRune('0') - } - b.WriteString(s) - break - } - b.WriteString(`\x{`) - b.WriteString(strconv.FormatInt(int64(r), 16)) - b.WriteString(`}`) - } -} - -// MaxCap walks the regexp to find the maximum capture index. -func (re *Regexp) MaxCap() int { - m := 0 - if re.Op == OpCapture { - m = re.Cap - } - for _, sub := range re.Sub { - if n := sub.MaxCap(); m < n { - m = n - } - } - return m -} - -// CapNames walks the regexp to find the names of capturing groups. -func (re *Regexp) CapNames() []string { - names := make([]string, re.MaxCap()+1) - re.capNames(names) - return names -} - -func (re *Regexp) capNames(names []string) { - if re.Op == OpCapture { - names[re.Cap] = re.Name - } - for _, sub := range re.Sub { - sub.capNames(names) - } -} diff --git a/vendor/github.com/grafana/regexp/syntax/simplify.go b/vendor/github.com/grafana/regexp/syntax/simplify.go deleted file mode 100644 index e4393251..00000000 --- a/vendor/github.com/grafana/regexp/syntax/simplify.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Simplify returns a regexp equivalent to re but without counted repetitions -// and with various other simplifications, such as rewriting /(?:a+)+/ to /a+/. -// The resulting regexp will execute correctly but its string representation -// will not produce the same parse tree, because capturing parentheses -// may have been duplicated or removed. For example, the simplified form -// for /(x){1,2}/ is /(x)(x)?/ but both parentheses capture as $1. -// The returned regexp may share structure with or be the original. -func (re *Regexp) Simplify() *Regexp { - if re == nil { - return nil - } - switch re.Op { - case OpCapture, OpConcat, OpAlternate: - // Simplify children, building new Regexp if children change. - nre := re - for i, sub := range re.Sub { - nsub := sub.Simplify() - if nre == re && nsub != sub { - // Start a copy. - nre = new(Regexp) - *nre = *re - nre.Rune = nil - nre.Sub = append(nre.Sub0[:0], re.Sub[:i]...) - } - if nre != re { - nre.Sub = append(nre.Sub, nsub) - } - } - return nre - - case OpStar, OpPlus, OpQuest: - sub := re.Sub[0].Simplify() - return simplify1(re.Op, re.Flags, sub, re) - - case OpRepeat: - // Special special case: x{0} matches the empty string - // and doesn't even need to consider x. - if re.Min == 0 && re.Max == 0 { - return &Regexp{Op: OpEmptyMatch} - } - - // The fun begins. - sub := re.Sub[0].Simplify() - - // x{n,} means at least n matches of x. - if re.Max == -1 { - // Special case: x{0,} is x*. - if re.Min == 0 { - return simplify1(OpStar, re.Flags, sub, nil) - } - - // Special case: x{1,} is x+. - if re.Min == 1 { - return simplify1(OpPlus, re.Flags, sub, nil) - } - - // General case: x{4,} is xxxx+. - nre := &Regexp{Op: OpConcat} - nre.Sub = nre.Sub0[:0] - for i := 0; i < re.Min-1; i++ { - nre.Sub = append(nre.Sub, sub) - } - nre.Sub = append(nre.Sub, simplify1(OpPlus, re.Flags, sub, nil)) - return nre - } - - // Special case x{0} handled above. - - // Special case: x{1} is just x. - if re.Min == 1 && re.Max == 1 { - return sub - } - - // General case: x{n,m} means n copies of x and m copies of x? - // The machine will do less work if we nest the final m copies, - // so that x{2,5} = xx(x(x(x)?)?)? - - // Build leading prefix: xx. - var prefix *Regexp - if re.Min > 0 { - prefix = &Regexp{Op: OpConcat} - prefix.Sub = prefix.Sub0[:0] - for i := 0; i < re.Min; i++ { - prefix.Sub = append(prefix.Sub, sub) - } - } - - // Build and attach suffix: (x(x(x)?)?)? - if re.Max > re.Min { - suffix := simplify1(OpQuest, re.Flags, sub, nil) - for i := re.Min + 1; i < re.Max; i++ { - nre2 := &Regexp{Op: OpConcat} - nre2.Sub = append(nre2.Sub0[:0], sub, suffix) - suffix = simplify1(OpQuest, re.Flags, nre2, nil) - } - if prefix == nil { - return suffix - } - prefix.Sub = append(prefix.Sub, suffix) - } - if prefix != nil { - return prefix - } - - // Some degenerate case like min > max or min < max < 0. - // Handle as impossible match. - return &Regexp{Op: OpNoMatch} - } - - return re -} - -// simplify1 implements Simplify for the unary OpStar, -// OpPlus, and OpQuest operators. It returns the simple regexp -// equivalent to -// -// Regexp{Op: op, Flags: flags, Sub: {sub}} -// -// under the assumption that sub is already simple, and -// without first allocating that structure. If the regexp -// to be returned turns out to be equivalent to re, simplify1 -// returns re instead. -// -// simplify1 is factored out of Simplify because the implementation -// for other operators generates these unary expressions. -// Letting them call simplify1 makes sure the expressions they -// generate are simple. -func simplify1(op Op, flags Flags, sub, re *Regexp) *Regexp { - // Special case: repeat the empty string as much as - // you want, but it's still the empty string. - if sub.Op == OpEmptyMatch { - return sub - } - // The operators are idempotent if the flags match. - if op == sub.Op && flags&NonGreedy == sub.Flags&NonGreedy { - return sub - } - if re != nil && re.Op == op && re.Flags&NonGreedy == flags&NonGreedy && sub == re.Sub[0] { - return re - } - - re = &Regexp{Op: op, Flags: flags} - re.Sub = append(re.Sub0[:0], sub) - return re -} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 0a894979..afc55af9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,60 @@ +## 2.20.2 + +Require Go 1.22+ + +### Maintenance +- bump go to v1.22 [a671816] + +## 2.20.1 + +### Fixes +- make BeSpecEvent duration matcher more forgiving [d6f9640] + +## 2.20.0 + +### Features +- Add buildvcs flag [be5ab95] + +### Maintenance +- Add update-deps to makefile [d303d14] +- bump all dependencies [7a50221] + +## 2.19.1 + +### Fixes +- update supported platforms for race conditions [63c8c30] +- [build] Allow custom name for binaries. [ff41e27] + +### Maintenance +- bump gomega [76f4e0c] +- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d] +- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741] + +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + ## 2.17.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7..80de566a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 00000000..06dff97c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,16 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going + +.PHONY: vet +vet: + go vet ./... + +.PHONY: update-deps +update-deps: + go get -u ./... + go mod tidy \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 5db5d1a7..fd172608 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,6 +2,8 @@ package build import ( "fmt" + "os" + "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - fmt.Printf("Compiled %s.test\n", suite.PackageName) + if len(goFlagsConfig.O) == 0 { + goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") + } else { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + goFlagsConfig.O += "/" + suite.PackageName + ".test" + } + } + fmt.Printf("Compiled %s\n", goFlagsConfig.O) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340..48827cc5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bd..0e6ae1f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a994ee3d..a3c9e6bf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 98097337..48073048 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -202,6 +202,11 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -278,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 2a3215b5..562e0f62 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index a3aef821..c7de7a8b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if internalBodyType.NumIn() > 0. { + if internalBodyType.NumIn() > 0 { if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index cef273ee..97a049e0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -90,6 +91,8 @@ type ReporterConfig struct { FullTrace bool ShowNodeEvents bool GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -199,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -216,6 +220,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -275,6 +280,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -334,6 +341,10 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -502,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -518,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -552,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -605,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -628,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651..7fdc8aa2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 5dd0140c..6dfb25f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.2" +const VERSION = "2.20.2" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 62af14ad..c6c34d65 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,25 @@ +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + +## 1.34.0 + +### Features +- Add RoundTripper method to ghttp.Server [c549e0d] + +### Fixes +- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c] +- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67] + +### Maintenance +- bump ginkgo [8af2ece] +- Fix typo in docs [123a071] +- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083] +- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f] +- Bump github-pages from 230 to 231 in /docs (#748) [892c303] + ## 1.33.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 9697d513..2546ccce 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.33.1" +const GOMEGA_VERSION = "1.34.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index dca5b944..5a236d7d 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -30,15 +30,18 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool lenMatchers := len(matchers) lenValues := len(values) + success = true for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i + success = false continue } if i >= lenValues { matcher.missingIndex = i + success = false return } @@ -49,15 +52,17 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool index: i, failure: err.Error(), }) + success = false } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), }) + success = false } } - return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil + return success, nil } func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 1c54edd8..4339acc6 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,6 +1,8 @@ package bipartitegraph import ( + "golang.org/x/exp/slices" + . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" "github.com/onsi/gomega/matchers/support/goraph/util" @@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} } + if done { // if last layer - into last layer must be only 'free' nodes + currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool { + return !matching.Free(in) + }) + } guideLayers = append(guideLayers, currentLayer) } diff --git a/vendor/github.com/prometheus/prometheus/LICENSE b/vendor/github.com/prometheus/prometheus/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/prometheus/prometheus/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE deleted file mode 100644 index 8605c258..00000000 --- a/vendor/github.com/prometheus/prometheus/NOTICE +++ /dev/null @@ -1,108 +0,0 @@ -The Prometheus systems and service monitoring server -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (https://soundcloud.com/). - - -The following components are included in this product: - -Bootstrap -https://getbootstrap.com -Copyright 2011-2014 Twitter, Inc. -Licensed under the MIT License - -bootstrap3-typeahead.js -https://github.com/bassjobsen/Bootstrap-3-Typeahead -Original written by @mdo and @fat -Copyright 2014 Bass Jobsen @bassjobsen -Licensed under the Apache License, Version 2.0 - -fuzzy -https://github.com/mattyork/fuzzy -Original written by @mattyork -Copyright 2012 Matt York -Licensed under the MIT License - -bootstrap-datetimepicker.js -https://github.com/Eonasdan/bootstrap-datetimepicker -Copyright 2015 Jonathan Peterson (@Eonasdan) -Licensed under the MIT License - -moment.js -https://github.com/moment/moment/ -Copyright JS Foundation and other contributors -Licensed under the MIT License - -Rickshaw -https://github.com/shutterstock/rickshaw -Copyright 2011-2014 by Shutterstock Images, LLC -See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details - -mustache.js -https://github.com/janl/mustache.js -Copyright 2009 Chris Wanstrath (Ruby) -Copyright 2010-2014 Jan Lehnardt (JavaScript) -Copyright 2010-2015 The mustache.js community -Licensed under the MIT License - -jQuery -https://jquery.org -Copyright jQuery Foundation and other contributors -Licensed under the MIT License - -Protocol Buffers for Go with Gadgets -https://github.com/gogo/protobuf/ -Copyright (c) 2013, The GoGo Authors. -See source code for license details. - -Go support for leveled logs, analogous to -https://code.google.com/p/google-glog/ -Copyright 2013 Google Inc. -Licensed under the Apache License, Version 2.0 - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 - -DNS library in Go -https://miek.nl/2014/august/16/go-dns-package/ -Copyright 2009 The Go Authors, 2011 Miek Gieben -See https://github.com/miekg/dns/blob/master/LICENSE for license details. - -LevelDB key/value database in Go -https://github.com/syndtr/goleveldb -Copyright 2012 Suryandaru Triandana -See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details. - -gosnappy - a fork of code.google.com/p/snappy-go -https://github.com/syndtr/gosnappy -Copyright 2011 The Snappy-Go Authors -See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details. - -go-zookeeper - Native ZooKeeper client for Go -https://github.com/samuel/go-zookeeper -Copyright (c) 2013, Samuel Stauffer -See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. - -Time series compression algorithm from Facebook's Gorilla paper -https://github.com/dgryski/go-tsz -Copyright (c) 2015,2016 Damian Gryski -See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details. - -The Go programming language -https://go.dev/ -Copyright (c) 2009 The Go Authors -See https://go.dev/LICENSE for license details. - -The Codicon icon font from Microsoft -https://github.com/microsoft/vscode-codicons -Copyright (c) Microsoft Corporation and other contributors -See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details. - -We also use code from a large number of npm packages. For details, see: -- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json -- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json -- The individual package licenses as copied from the node_modules directory can be found in - the npm_licenses.tar.bz2 archive in release tarballs and Docker images. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go deleted file mode 100644 index 01514abf..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !stringlabels && !dedupelabels - -package labels - -import ( - "bytes" - "slices" - "strings" - - "github.com/cespare/xxhash/v2" -) - -// Labels is a sorted set of labels. Order has to be guaranteed upon -// instantiation. -type Labels []Label - -func (ls Labels) Len() int { return len(ls) } -func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } -func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } - -// Bytes returns ls as a byte slice. -// It uses an byte invalid character as a separator and so should not be used for printing. -func (ls Labels) Bytes(buf []byte) []byte { - b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) - for i, l := range ls { - if i > 0 { - b.WriteByte(seps[0]) - } - b.WriteString(l.Name) - b.WriteByte(seps[0]) - b.WriteString(l.Value) - } - return b.Bytes() -} - -// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. -// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. -func (ls Labels) MatchLabels(on bool, names ...string) Labels { - matchedLabels := Labels{} - - nameSet := make(map[string]struct{}, len(names)) - for _, n := range names { - nameSet[n] = struct{}{} - } - - for _, v := range ls { - if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) { - matchedLabels = append(matchedLabels, v) - } - } - - return matchedLabels -} - -// Hash returns a hash value for the label set. -// Note: the result is not guaranteed to be consistent across different runs of Prometheus. -func (ls Labels) Hash() uint64 { - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for i, v := range ls { - if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { - // If labels entry is 1KB+ do not allocate whole entry. - h := xxhash.New() - _, _ = h.Write(b) - for _, v := range ls[i:] { - _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) - _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, v.Name...) - b = append(b, seps[0]) - b = append(b, v.Value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b) -} - -// HashForLabels returns a hash value for the labels matching the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - i, j := 0, 0 - for i < len(ls) && j < len(names) { - switch { - case names[j] < ls[i].Name: - j++ - case ls[i].Name < names[j]: - i++ - default: - b = append(b, ls[i].Name...) - b = append(b, seps[0]) - b = append(b, ls[i].Value...) - b = append(b, seps[0]) - i++ - j++ - } - } - return xxhash.Sum64(b), b -} - -// HashWithoutLabels returns a hash value for all labels except those matching -// the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - j := 0 - for i := range ls { - for j < len(names) && names[j] < ls[i].Name { - j++ - } - if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { - continue - } - b = append(b, ls[i].Name...) - b = append(b, seps[0]) - b = append(b, ls[i].Value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b), b -} - -// BytesWithLabels is just as Bytes(), but only for labels matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { - b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) - i, j := 0, 0 - for i < len(ls) && j < len(names) { - switch { - case names[j] < ls[i].Name: - j++ - case ls[i].Name < names[j]: - i++ - default: - if b.Len() > 1 { - b.WriteByte(seps[0]) - } - b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) - b.WriteString(ls[i].Value) - i++ - j++ - } - } - return b.Bytes() -} - -// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { - b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) - j := 0 - for i := range ls { - for j < len(names) && names[j] < ls[i].Name { - j++ - } - if j < len(names) && ls[i].Name == names[j] { - continue - } - if b.Len() > 1 { - b.WriteByte(seps[0]) - } - b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) - b.WriteString(ls[i].Value) - } - return b.Bytes() -} - -// Copy returns a copy of the labels. -func (ls Labels) Copy() Labels { - res := make(Labels, len(ls)) - copy(res, ls) - return res -} - -// Get returns the value for the label with the given name. -// Returns an empty string if the label doesn't exist. -func (ls Labels) Get(name string) string { - for _, l := range ls { - if l.Name == name { - return l.Value - } - } - return "" -} - -// Has returns true if the label with the given name is present. -func (ls Labels) Has(name string) bool { - for _, l := range ls { - if l.Name == name { - return true - } - } - return false -} - -// HasDuplicateLabelNames returns whether ls has duplicate label names. -// It assumes that the labelset is sorted. -func (ls Labels) HasDuplicateLabelNames() (string, bool) { - for i, l := range ls { - if i == 0 { - continue - } - if l.Name == ls[i-1].Name { - return l.Name, true - } - } - return "", false -} - -// WithoutEmpty returns the labelset without empty labels. -// May return the same labelset. -func (ls Labels) WithoutEmpty() Labels { - for _, v := range ls { - if v.Value != "" { - continue - } - // Do not copy the slice until it's necessary. - els := make(Labels, 0, len(ls)-1) - for _, v := range ls { - if v.Value != "" { - els = append(els, v) - } - } - return els - } - return ls -} - -// Equal returns whether the two label sets are equal. -func Equal(ls, o Labels) bool { - if len(ls) != len(o) { - return false - } - for i, l := range ls { - if l != o[i] { - return false - } - } - return true -} - -// EmptyLabels returns n empty Labels value, for convenience. -func EmptyLabels() Labels { - return Labels{} -} - -// New returns a sorted Labels from the given labels. -// The caller has to guarantee that all label names are unique. -func New(ls ...Label) Labels { - set := make(Labels, 0, len(ls)) - set = append(set, ls...) - slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - - return set -} - -// FromStrings creates new labels from pairs of strings. -func FromStrings(ss ...string) Labels { - if len(ss)%2 != 0 { - panic("invalid number of strings") - } - res := make(Labels, 0, len(ss)/2) - for i := 0; i < len(ss); i += 2 { - res = append(res, Label{Name: ss[i], Value: ss[i+1]}) - } - - slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - return res -} - -// Compare compares the two label sets. -// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -func Compare(a, b Labels) int { - l := len(a) - if len(b) < l { - l = len(b) - } - - for i := 0; i < l; i++ { - if a[i].Name != b[i].Name { - if a[i].Name < b[i].Name { - return -1 - } - return 1 - } - if a[i].Value != b[i].Value { - if a[i].Value < b[i].Value { - return -1 - } - return 1 - } - } - // If all labels so far were in common, the set with fewer labels comes first. - return len(a) - len(b) -} - -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. -func (ls *Labels) CopyFrom(b Labels) { - (*ls) = append((*ls)[:0], b...) -} - -// IsEmpty returns true if ls represents an empty set of labels. -func (ls Labels) IsEmpty() bool { - return len(ls) == 0 -} - -// Range calls f on each label. -func (ls Labels) Range(f func(l Label)) { - for _, l := range ls { - f(l) - } -} - -// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. -func (ls Labels) Validate(f func(l Label) error) error { - for _, l := range ls { - if err := f(l); err != nil { - return err - } - } - return nil -} - -// DropMetricName returns Labels with "__name__" removed. -func (ls Labels) DropMetricName() Labels { - for i, l := range ls { - if l.Name == MetricName { - if i == 0 { // Make common case fast with no allocations. - return ls[1:] - } - // Avoid modifying original Labels - use [:i:i] so that left slice would not - // have any spare capacity and append would have to allocate a new slice for the result. - return append(ls[:i:i], ls[i+1:]...) - } - } - return ls -} - -// InternStrings calls intern on every string value inside ls, replacing them with what it returns. -func (ls *Labels) InternStrings(intern func(string) string) { - for i, l := range *ls { - (*ls)[i].Name = intern(l.Name) - (*ls)[i].Value = intern(l.Value) - } -} - -// ReleaseStrings calls release on every string value inside ls. -func (ls Labels) ReleaseStrings(release func(string)) { - for _, l := range ls { - release(l.Name) - release(l.Value) - } -} - -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - b.base.Range(func(l Label) { - if l.Value == "" { - b.del = append(b.del, l.Name) - } - }) -} - -// Labels returns the labels from the builder. -// If no modifications were made, the original labels are returned. -func (b *Builder) Labels() Labels { - if len(b.del) == 0 && len(b.add) == 0 { - return b.base - } - - expectedSize := len(b.base) + len(b.add) - len(b.del) - if expectedSize < 1 { - expectedSize = 1 - } - res := make(Labels, 0, expectedSize) - for _, l := range b.base { - if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { - continue - } - res = append(res, l) - } - if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. - res = append(res, b.add...) - slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - } - return res -} - -// ScratchBuilder allows efficient construction of a Labels from scratch. -type ScratchBuilder struct { - add Labels -} - -// Symbol-table is no-op, just for api parity with dedupelabels. -type SymbolTable struct{} - -func NewSymbolTable() *SymbolTable { return nil } - -func (t *SymbolTable) Len() int { return 0 } - -// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. -func NewScratchBuilder(n int) ScratchBuilder { - return ScratchBuilder{add: make([]Label, 0, n)} -} - -// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. -func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { - return NewBuilder(EmptyLabels()) -} - -// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. -func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { - return NewScratchBuilder(n) -} - -func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { - // no-op -} - -func (b *ScratchBuilder) Reset() { - b.add = b.add[:0] -} - -// Add a name/value pair. -// Note if you Add the same name twice you will get a duplicate label, which is invalid. -func (b *ScratchBuilder) Add(name, value string) { - b.add = append(b.add, Label{Name: name, Value: value}) -} - -// Add a name/value pair, using []byte instead of string. -// The '-tags stringlabels' version of this function is unsafe, hence the name. -// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. -func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { - b.add = append(b.add, Label{Name: string(name), Value: string(value)}) -} - -// Sort the labels added so far by name. -func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) -} - -// Assign is for when you already have a Labels which you want this ScratchBuilder to return. -func (b *ScratchBuilder) Assign(ls Labels) { - b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. -} - -// Return the name/value pairs added so far as a Labels object. -// Note: if you want them sorted, call Sort() first. -func (b *ScratchBuilder) Labels() Labels { - // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. - return append([]Label{}, b.add...) -} - -// Write the newly-built Labels out to ls. -// Callers must ensure that there are no other references to ls, or any strings fetched from it. -func (b *ScratchBuilder) Overwrite(ls *Labels) { - *ls = append((*ls)[:0], b.add...) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go deleted file mode 100644 index f46321c9..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package labels - -import ( - "bytes" - "encoding/json" - "slices" - "strconv" - - "github.com/prometheus/common/model" -) - -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" - - labelSep = '\xfe' -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - -func (ls Labels) String() string { - var bytea [1024]byte // On stack to avoid memory allocation while building the output. - b := bytes.NewBuffer(bytea[:0]) - - b.WriteByte('{') - i := 0 - ls.Range(func(l Label) { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - b.WriteString(l.Name) - b.WriteByte('=') - b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) - i++ - }) - b.WriteByte('}') - return b.String() -} - -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return strconv.ErrSyntax - } - return nil - }) - return err == nil -} - -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string) - ls.Range(func(l Label) { - m[l.Name] = l.Value - }) - return m -} - -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { - b.base.Range(func(l Label) { - for _, n := range ns { - if l.Name == n { - return - } - } - b.del = append(b.del, l.Name) - }) - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } - }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go deleted file mode 100644 index dfc74aa3..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go +++ /dev/null @@ -1,807 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build dedupelabels - -package labels - -import ( - "bytes" - "slices" - "strings" - "sync" - "unsafe" - - "github.com/cespare/xxhash/v2" -) - -// Labels is implemented by a SymbolTable and string holding name/value -// pairs encoded as indexes into the table in varint encoding. -// Names are in alphabetical order. -type Labels struct { - syms *nameTable - data string -} - -// Split SymbolTable into the part used by Labels and the part used by Builder. Only the latter needs the map. - -// This part is used by Labels. All fields are immutable after construction. -type nameTable struct { - byNum []string // This slice header is never changed, even while we are building the symbol table. - symbolTable *SymbolTable // If we need to use it in a Builder. -} - -// SymbolTable is used to map strings into numbers so they can be packed together. -type SymbolTable struct { - mx sync.Mutex - *nameTable - nextNum int - byName map[string]int -} - -const defaultSymbolTableSize = 1024 - -func NewSymbolTable() *SymbolTable { - t := &SymbolTable{ - nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)}, - byName: make(map[string]int, defaultSymbolTableSize), - } - t.nameTable.symbolTable = t - return t -} - -func (t *SymbolTable) Len() int { - t.mx.Lock() - defer t.mx.Unlock() - return len(t.byName) -} - -// ToNum maps a string to an integer, adding the string to the table if it is not already there. -// Note: copies the string before adding, in case the caller passed part of -// a buffer that should not be kept alive by this SymbolTable. -func (t *SymbolTable) ToNum(name string) int { - t.mx.Lock() - defer t.mx.Unlock() - return t.toNumUnlocked(name) -} - -func (t *SymbolTable) toNumUnlocked(name string) int { - if i, found := t.byName[name]; found { - return i - } - i := t.nextNum - if t.nextNum == cap(t.byNum) { - // Name table is full; copy to a new one. Don't touch the existing slice, as nameTable is immutable after construction. - newSlice := make([]string, cap(t.byNum)*2) - copy(newSlice, t.byNum) - t.nameTable = &nameTable{byNum: newSlice, symbolTable: t} - } - name = strings.Clone(name) - t.byNum[i] = name - t.byName[name] = i - t.nextNum++ - return i -} - -func (t *SymbolTable) checkNum(name string) (int, bool) { - t.mx.Lock() - defer t.mx.Unlock() - i, bool := t.byName[name] - return i, bool -} - -// ToName maps an integer to a string. -func (t *nameTable) ToName(num int) string { - return t.byNum[num] -} - -func decodeVarint(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. - b := data[index] - index++ - if b < 0x80 { - return int(b), index - } - value := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { - // Just panic if we go of the end of data, since all Labels strings are constructed internally and - // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - value |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - return value, index -} - -func decodeString(t *nameTable, data string, index int) (string, int) { - var num int - num, index = decodeVarint(data, index) - return t.ToName(num), index -} - -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. -func (ls Labels) Bytes(buf []byte) []byte { - b := bytes.NewBuffer(buf[:0]) - for i := 0; i < len(ls.data); { - if i > 0 { - b.WriteByte(seps[0]) - } - var name, value string - name, i = decodeString(ls.syms, ls.data, i) - value, i = decodeString(ls.syms, ls.data, i) - b.WriteString(name) - b.WriteByte(seps[0]) - b.WriteString(value) - } - return b.Bytes() -} - -// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. -func (ls Labels) IsZero() bool { - return len(ls.data) == 0 -} - -// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. -// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. -// TODO: This is only used in printing an error message -func (ls Labels) MatchLabels(on bool, names ...string) Labels { - b := NewBuilder(ls) - if on { - b.Keep(names...) - } else { - b.Del(MetricName) - b.Del(names...) - } - return b.Labels() -} - -// Hash returns a hash value for the label set. -// Note: the result is not guaranteed to be consistent across different runs of Prometheus. -func (ls Labels) Hash() uint64 { - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for pos := 0; pos < len(ls.data); { - name, newPos := decodeString(ls.syms, ls.data, pos) - value, newPos := decodeString(ls.syms, ls.data, newPos) - if len(b)+len(name)+len(value)+2 >= cap(b) { - // If labels entry is 1KB+, hash the rest of them via Write(). - h := xxhash.New() - _, _ = h.Write(b) - for pos < len(ls.data) { - name, pos = decodeString(ls.syms, ls.data, pos) - value, pos = decodeString(ls.syms, ls.data, pos) - _, _ = h.WriteString(name) - _, _ = h.Write(seps) - _, _ = h.WriteString(value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - pos = newPos - } - return xxhash.Sum64(b) -} - -// HashForLabels returns a hash value for the labels matching the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - j := 0 - for i := 0; i < len(ls.data); { - var name, value string - name, i = decodeString(ls.syms, ls.data, i) - value, i = decodeString(ls.syms, ls.data, i) - for j < len(names) && names[j] < name { - j++ - } - if j == len(names) { - break - } - if name == names[j] { - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - } - } - - return xxhash.Sum64(b), b -} - -// HashWithoutLabels returns a hash value for all labels except those matching -// the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - j := 0 - for i := 0; i < len(ls.data); { - var name, value string - name, i = decodeString(ls.syms, ls.data, i) - value, i = decodeString(ls.syms, ls.data, i) - for j < len(names) && names[j] < name { - j++ - } - if name == MetricName || (j < len(names) && name == names[j]) { - continue - } - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b), b -} - -// BytesWithLabels is just as Bytes(), but only for labels matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { - b := bytes.NewBuffer(buf[:0]) - j := 0 - for pos := 0; pos < len(ls.data); { - lName, newPos := decodeString(ls.syms, ls.data, pos) - lValue, newPos := decodeString(ls.syms, ls.data, newPos) - for j < len(names) && names[j] < lName { - j++ - } - if j == len(names) { - break - } - if lName == names[j] { - if b.Len() > 1 { - b.WriteByte(seps[0]) - } - b.WriteString(lName) - b.WriteByte(seps[0]) - b.WriteString(lValue) - } - pos = newPos - } - return b.Bytes() -} - -// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { - b := bytes.NewBuffer(buf[:0]) - j := 0 - for pos := 0; pos < len(ls.data); { - lName, newPos := decodeString(ls.syms, ls.data, pos) - lValue, newPos := decodeString(ls.syms, ls.data, newPos) - for j < len(names) && names[j] < lName { - j++ - } - if j == len(names) || lName != names[j] { - if b.Len() > 1 { - b.WriteByte(seps[0]) - } - b.WriteString(lName) - b.WriteByte(seps[0]) - b.WriteString(lValue) - } - pos = newPos - } - return b.Bytes() -} - -// Copy returns a copy of the labels. -func (ls Labels) Copy() Labels { - return Labels{syms: ls.syms, data: strings.Clone(ls.data)} -} - -// Get returns the value for the label with the given name. -// Returns an empty string if the label doesn't exist. -func (ls Labels) Get(name string) string { - if name == "" { // Avoid crash in loop if someone asks for "". - return "" // Prometheus does not store blank label names. - } - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.syms, ls.data, i) - if lName == name { - lValue, _ = decodeString(ls.syms, ls.data, i) - return lValue - } else if lName[0] > name[0] { // Stop looking if we've gone past. - break - } - _, i = decodeVarint(ls.data, i) - } - return "" -} - -// Has returns true if the label with the given name is present. -func (ls Labels) Has(name string) bool { - if name == "" { // Avoid crash in loop if someone asks for "". - return false // Prometheus does not store blank label names. - } - for i := 0; i < len(ls.data); { - var lName string - lName, i = decodeString(ls.syms, ls.data, i) - if lName == name { - return true - } else if lName[0] > name[0] { // Stop looking if we've gone past. - break - } - _, i = decodeVarint(ls.data, i) - } - return false -} - -// HasDuplicateLabelNames returns whether ls has duplicate label names. -// It assumes that the labelset is sorted. -func (ls Labels) HasDuplicateLabelNames() (string, bool) { - prevNum := -1 - for i := 0; i < len(ls.data); { - var lNum int - lNum, i = decodeVarint(ls.data, i) - _, i = decodeVarint(ls.data, i) - if lNum == prevNum { - return ls.syms.ToName(lNum), true - } - prevNum = lNum - } - return "", false -} - -// WithoutEmpty returns the labelset without empty labels. -// May return the same labelset. -func (ls Labels) WithoutEmpty() Labels { - if ls.IsEmpty() { - return ls - } - // Idea: have a constant symbol for blank, then we don't have to look it up. - blank, ok := ls.syms.symbolTable.checkNum("") - if !ok { // Symbol table has no entry for blank - none of the values can be blank. - return ls - } - for pos := 0; pos < len(ls.data); { - _, newPos := decodeVarint(ls.data, pos) - lValue, newPos := decodeVarint(ls.data, newPos) - if lValue != blank { - pos = newPos - continue - } - // Do not copy the slice until it's necessary. - // TODO: could optimise the case where all blanks are at the end. - // Note: we size the new buffer on the assumption there is exactly one blank value. - buf := make([]byte, pos, pos+(len(ls.data)-newPos)) - copy(buf, ls.data[:pos]) // copy the initial non-blank labels - pos = newPos // move past the first blank value - for pos < len(ls.data) { - var newPos int - _, newPos = decodeVarint(ls.data, pos) - lValue, newPos = decodeVarint(ls.data, newPos) - if lValue != blank { - buf = append(buf, ls.data[pos:newPos]...) - } - pos = newPos - } - return Labels{syms: ls.syms, data: yoloString(buf)} - } - return ls -} - -// Equal returns whether the two label sets are equal. -func Equal(a, b Labels) bool { - if a.syms == b.syms { - return a.data == b.data - } - - la, lb := len(a.data), len(b.data) - ia, ib := 0, 0 - for ia < la && ib < lb { - var aValue, bValue string - aValue, ia = decodeString(a.syms, a.data, ia) - bValue, ib = decodeString(b.syms, b.data, ib) - if aValue != bValue { - return false - } - } - if ia != la || ib != lb { - return false - } - return true -} - -// EmptyLabels returns an empty Labels value, for convenience. -func EmptyLabels() Labels { - return Labels{} -} - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - -// New returns a sorted Labels from the given labels. -// The caller has to guarantee that all label names are unique. -// Note this function is not efficient; should not be used in performance-critical places. -func New(ls ...Label) Labels { - slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - syms := NewSymbolTable() - var stackSpace [16]int - size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:]) - buf := make([]byte, size) - marshalNumbersToSizedBuffer(nums, buf) - return Labels{syms: syms.nameTable, data: yoloString(buf)} -} - -// FromStrings creates new labels from pairs of strings. -func FromStrings(ss ...string) Labels { - if len(ss)%2 != 0 { - panic("invalid number of strings") - } - ls := make([]Label, 0, len(ss)/2) - for i := 0; i < len(ss); i += 2 { - ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) - } - - return New(ls...) -} - -// Compare compares the two label sets. -// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -func Compare(a, b Labels) int { - la, lb := len(a.data), len(b.data) - ia, ib := 0, 0 - for ia < la && ib < lb { - var aName, bName string - aName, ia = decodeString(a.syms, a.data, ia) - bName, ib = decodeString(b.syms, b.data, ib) - if aName != bName { - if aName < bName { - return -1 - } - return 1 - } - var aValue, bValue string - aValue, ia = decodeString(a.syms, a.data, ia) - bValue, ib = decodeString(b.syms, b.data, ib) - if aValue != bValue { - if aValue < bValue { - return -1 - } - return 1 - } - } - // If all labels so far were in common, the set with fewer labels comes first. - return (la - ia) - (lb - ib) -} - -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. -func (ls *Labels) CopyFrom(b Labels) { - *ls = b // Straightforward memberwise copy is all we need. -} - -// IsEmpty returns true if ls represents an empty set of labels. -func (ls Labels) IsEmpty() bool { - return len(ls.data) == 0 -} - -// Len returns the number of labels; it is relatively slow. -func (ls Labels) Len() int { - count := 0 - for i := 0; i < len(ls.data); { - _, i = decodeVarint(ls.data, i) - _, i = decodeVarint(ls.data, i) - count++ - } - return count -} - -// Range calls f on each label. -func (ls Labels) Range(f func(l Label)) { - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.syms, ls.data, i) - lValue, i = decodeString(ls.syms, ls.data, i) - f(Label{Name: lName, Value: lValue}) - } -} - -// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. -func (ls Labels) Validate(f func(l Label) error) error { - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.syms, ls.data, i) - lValue, i = decodeString(ls.syms, ls.data, i) - err := f(Label{Name: lName, Value: lValue}) - if err != nil { - return err - } - } - return nil -} - -// InternStrings calls intern on every string value inside ls, replacing them with what it returns. -func (ls *Labels) InternStrings(intern func(string) string) { - // TODO: remove these calls as there is nothing to do. -} - -// ReleaseStrings calls release on every string value inside ls. -func (ls Labels) ReleaseStrings(release func(string)) { - // TODO: remove these calls as there is nothing to do. -} - -// DropMetricName returns Labels with "__name__" removed. -func (ls Labels) DropMetricName() Labels { - for i := 0; i < len(ls.data); { - lName, i2 := decodeString(ls.syms, ls.data, i) - _, i2 = decodeVarint(ls.data, i2) - if lName == MetricName { - if i == 0 { // Make common case fast with no allocations. - ls.data = ls.data[i2:] - } else { - ls.data = ls.data[:i] + ls.data[i2:] - } - break - } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. - break - } - i = i2 - } - return ls -} - -// Builder allows modifying Labels. -type Builder struct { - syms *SymbolTable - nums []int - base Labels - del []string - add []Label -} - -// NewBuilderWithSymbolTable returns a new LabelsBuilder not based on any labels, but with the SymbolTable. -func NewBuilderWithSymbolTable(s *SymbolTable) *Builder { - return &Builder{ - syms: s, - } -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - if base.syms != nil { // If base has a symbol table, use that. - b.syms = base.syms.symbolTable - } else if b.syms == nil { // Or continue using previous symbol table in builder. - b.syms = NewSymbolTable() // Don't do this in performance-sensitive code. - } - - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - base.Range(func(l Label) { - if l.Value == "" { - b.del = append(b.del, l.Name) - } - }) -} - -// Labels returns the labels from the builder. -// If no modifications were made, the original labels are returned. -func (b *Builder) Labels() Labels { - if len(b.del) == 0 && len(b.add) == 0 { - return b.base - } - - slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - slices.Sort(b.del) - a, d, newSize := 0, 0, 0 - - newSize, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) - bufSize := len(b.base.data) + newSize - buf := make([]byte, 0, bufSize) - for pos := 0; pos < len(b.base.data); { - oldPos := pos - var lName string - lName, pos = decodeString(b.base.syms, b.base.data, pos) - _, pos = decodeVarint(b.base.data, pos) - for d < len(b.del) && b.del[d] < lName { - d++ - } - if d < len(b.del) && b.del[d] == lName { - continue // This label has been deleted. - } - for ; a < len(b.add) && b.add[a].Name < lName; a++ { - buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) // Insert label that was not in the base set. - } - if a < len(b.add) && b.add[a].Name == lName { - buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) - a++ - continue // This label has been replaced. - } - buf = append(buf, b.base.data[oldPos:pos]...) // If base had a symbol-table we are using it, so we don't need to look up these symbols. - } - // We have come to the end of the base set; add any remaining labels. - for ; a < len(b.add); a++ { - buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) - } - return Labels{syms: b.syms.nameTable, data: yoloString(buf)} -} - -func marshalNumbersToSizedBuffer(nums []int, data []byte) int { - i := len(data) - for index := len(nums) - 1; index >= 0; index-- { - i = encodeVarint(data, i, nums[index]) - } - return len(data) - i -} - -func sizeVarint(x uint64) (n int) { - // Most common case first - if x < 1<<7 { - return 1 - } - if x >= 1<<56 { - return 9 - } - if x >= 1<<28 { - x >>= 28 - n = 4 - } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 -} - -func encodeVarintSlow(data []byte, offset int, v uint64) int { - offset -= sizeVarint(v) - base := offset - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return base -} - -// Special code for the common case that a value is less than 128 -func encodeVarint(data []byte, offset, v int) int { - if v < 1<<7 { - offset-- - data[offset] = uint8(v) - return offset - } - return encodeVarintSlow(data, offset, uint64(v)) -} - -// Map all the strings in lbls to the symbol table; return the total size required to hold them and all the individual mappings. -func mapLabelsToNumbers(t *SymbolTable, lbls []Label, buf []int) (totalSize int, nums []int) { - nums = buf[:0] - t.mx.Lock() - defer t.mx.Unlock() - // we just encode name/value/name/value, without any extra tags or length bytes - for _, m := range lbls { - // strings are encoded as a single varint, the index into the symbol table. - i := t.toNumUnlocked(m.Name) - nums = append(nums, i) - totalSize += sizeVarint(uint64(i)) - i = t.toNumUnlocked(m.Value) - nums = append(nums, i) - totalSize += sizeVarint(uint64(i)) - } - return totalSize, nums -} - -func appendLabelTo(nameNum, valueNum int, buf []byte) []byte { - size := sizeVarint(uint64(nameNum)) + sizeVarint(uint64(valueNum)) - sizeRequired := len(buf) + size - if cap(buf) >= sizeRequired { - buf = buf[:sizeRequired] - } else { - bufSize := cap(buf) - // Double size of buffer each time it needs to grow, to amortise copying cost. - for bufSize < sizeRequired { - bufSize = bufSize*2 + 1 - } - newBuf := make([]byte, sizeRequired, bufSize) - copy(newBuf, buf) - buf = newBuf - } - i := sizeRequired - i = encodeVarint(buf, i, valueNum) - i = encodeVarint(buf, i, nameNum) - return buf -} - -// ScratchBuilder allows efficient construction of a Labels from scratch. -type ScratchBuilder struct { - syms *SymbolTable - nums []int - add []Label - output Labels - overwriteBuffer []byte -} - -// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. -// Warning: expensive; don't call in tight loops. -func NewScratchBuilder(n int) ScratchBuilder { - return ScratchBuilder{syms: NewSymbolTable(), add: make([]Label, 0, n)} -} - -// NewScratchBuilderWithSymbolTable creates a ScratchBuilder initialized for Labels with n entries. -func NewScratchBuilderWithSymbolTable(s *SymbolTable, n int) ScratchBuilder { - return ScratchBuilder{syms: s, add: make([]Label, 0, n)} -} - -func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) { - b.syms = s -} - -func (b *ScratchBuilder) Reset() { - b.add = b.add[:0] - b.output = EmptyLabels() -} - -// Add a name/value pair. -// Note if you Add the same name twice you will get a duplicate label, which is invalid. -func (b *ScratchBuilder) Add(name, value string) { - b.add = append(b.add, Label{Name: name, Value: value}) -} - -// Add a name/value pair, using []byte instead of string to reduce memory allocations. -// The values must remain live until Labels() is called. -func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { - b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) -} - -// Sort the labels added so far by name. -func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) -} - -// Assign is for when you already have a Labels which you want this ScratchBuilder to return. -func (b *ScratchBuilder) Assign(l Labels) { - b.output = l -} - -// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. -// Note: if you want them sorted, call Sort() first. -func (b *ScratchBuilder) Labels() Labels { - if b.output.IsEmpty() { - var size int - size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) - buf := make([]byte, size) - marshalNumbersToSizedBuffer(b.nums, buf) - b.output = Labels{syms: b.syms.nameTable, data: yoloString(buf)} - } - return b.output -} - -// Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls, or any strings fetched from it. -func (b *ScratchBuilder) Overwrite(ls *Labels) { - var size int - size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) - if size <= cap(b.overwriteBuffer) { - b.overwriteBuffer = b.overwriteBuffer[:size] - } else { - b.overwriteBuffer = make([]byte, size) - } - marshalNumbersToSizedBuffer(b.nums, b.overwriteBuffer) - ls.syms = b.syms.nameTable - ls.data = yoloString(b.overwriteBuffer) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go deleted file mode 100644 index 9ef764da..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build stringlabels - -package labels - -import ( - "reflect" - "slices" - "strings" - "unsafe" - - "github.com/cespare/xxhash/v2" -) - -// Labels is implemented by a single flat string holding name/value pairs. -// Each name and value is preceded by its length in varint encoding. -// Names are in order. -type Labels struct { - data string -} - -func decodeSize(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. - b := data[index] - index++ - if b < 0x80 { - return int(b), index - } - size := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { - // Just panic if we go of the end of data, since all Labels strings are constructed internally and - // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - size |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - return size, index -} - -func decodeString(data string, index int) (string, int) { - var size int - size, index = decodeSize(data, index) - return data[index : index+size], index + size -} - -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. -func (ls Labels) Bytes(buf []byte) []byte { - if cap(buf) < len(ls.data) { - buf = make([]byte, len(ls.data)) - } else { - buf = buf[:len(ls.data)] - } - copy(buf, ls.data) - return buf -} - -// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. -func (ls Labels) IsZero() bool { - return len(ls.data) == 0 -} - -// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. -// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. -// TODO: This is only used in printing an error message -func (ls Labels) MatchLabels(on bool, names ...string) Labels { - b := NewBuilder(ls) - if on { - b.Keep(names...) - } else { - b.Del(MetricName) - b.Del(names...) - } - return b.Labels() -} - -// Hash returns a hash value for the label set. -// Note: the result is not guaranteed to be consistent across different runs of Prometheus. -func (ls Labels) Hash() uint64 { - return xxhash.Sum64(yoloBytes(ls.data)) -} - -// HashForLabels returns a hash value for the labels matching the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - j := 0 - for i := 0; i < len(ls.data); { - var name, value string - name, i = decodeString(ls.data, i) - value, i = decodeString(ls.data, i) - for j < len(names) && names[j] < name { - j++ - } - if j == len(names) { - break - } - if name == names[j] { - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - } - } - - return xxhash.Sum64(b), b -} - -// HashWithoutLabels returns a hash value for all labels except those matching -// the provided names. -// 'names' have to be sorted in ascending order. -func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { - b = b[:0] - j := 0 - for i := 0; i < len(ls.data); { - var name, value string - name, i = decodeString(ls.data, i) - value, i = decodeString(ls.data, i) - for j < len(names) && names[j] < name { - j++ - } - if name == MetricName || (j < len(names) && name == names[j]) { - continue - } - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b), b -} - -// BytesWithLabels is just as Bytes(), but only for labels matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { - b := buf[:0] - j := 0 - for pos := 0; pos < len(ls.data); { - lName, newPos := decodeString(ls.data, pos) - _, newPos = decodeString(ls.data, newPos) - for j < len(names) && names[j] < lName { - j++ - } - if j == len(names) { - break - } - if lName == names[j] { - b = append(b, ls.data[pos:newPos]...) - } - pos = newPos - } - return b -} - -// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. -// 'names' have to be sorted in ascending order. -func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { - b := buf[:0] - j := 0 - for pos := 0; pos < len(ls.data); { - lName, newPos := decodeString(ls.data, pos) - _, newPos = decodeString(ls.data, newPos) - for j < len(names) && names[j] < lName { - j++ - } - if j == len(names) || lName != names[j] { - b = append(b, ls.data[pos:newPos]...) - } - pos = newPos - } - return b -} - -// Copy returns a copy of the labels. -func (ls Labels) Copy() Labels { - return Labels{data: strings.Clone(ls.data)} -} - -// Get returns the value for the label with the given name. -// Returns an empty string if the label doesn't exist. -func (ls Labels) Get(name string) string { - if name == "" { // Avoid crash in loop if someone asks for "". - return "" // Prometheus does not store blank label names. - } - for i := 0; i < len(ls.data); { - var size int - size, i = decodeSize(ls.data, i) - if ls.data[i] == name[0] { - lName := ls.data[i : i+size] - i += size - if lName == name { - lValue, _ := decodeString(ls.data, i) - return lValue - } - } else { - if ls.data[i] > name[0] { // Stop looking if we've gone past. - break - } - i += size - } - size, i = decodeSize(ls.data, i) - i += size - } - return "" -} - -// Has returns true if the label with the given name is present. -func (ls Labels) Has(name string) bool { - if name == "" { // Avoid crash in loop if someone asks for "". - return false // Prometheus does not store blank label names. - } - for i := 0; i < len(ls.data); { - var size int - size, i = decodeSize(ls.data, i) - if ls.data[i] == name[0] { - lName := ls.data[i : i+size] - i += size - if lName == name { - return true - } - } else { - if ls.data[i] > name[0] { // Stop looking if we've gone past. - break - } - i += size - } - size, i = decodeSize(ls.data, i) - i += size - } - return false -} - -// HasDuplicateLabelNames returns whether ls has duplicate label names. -// It assumes that the labelset is sorted. -func (ls Labels) HasDuplicateLabelNames() (string, bool) { - var lName, prevName string - for i := 0; i < len(ls.data); { - lName, i = decodeString(ls.data, i) - _, i = decodeString(ls.data, i) - if lName == prevName { - return lName, true - } - prevName = lName - } - return "", false -} - -// WithoutEmpty returns the labelset without empty labels. -// May return the same labelset. -func (ls Labels) WithoutEmpty() Labels { - for pos := 0; pos < len(ls.data); { - _, newPos := decodeString(ls.data, pos) - lValue, newPos := decodeString(ls.data, newPos) - if lValue != "" { - pos = newPos - continue - } - // Do not copy the slice until it's necessary. - // TODO: could optimise the case where all blanks are at the end. - // Note: we size the new buffer on the assumption there is exactly one blank value. - buf := make([]byte, pos, pos+(len(ls.data)-newPos)) - copy(buf, ls.data[:pos]) // copy the initial non-blank labels - pos = newPos // move past the first blank value - for pos < len(ls.data) { - var newPos int - _, newPos = decodeString(ls.data, pos) - lValue, newPos = decodeString(ls.data, newPos) - if lValue != "" { - buf = append(buf, ls.data[pos:newPos]...) - } - pos = newPos - } - return Labels{data: yoloString(buf)} - } - return ls -} - -// Equal returns whether the two label sets are equal. -func Equal(ls, o Labels) bool { - return ls.data == o.data -} - -// EmptyLabels returns an empty Labels value, for convenience. -func EmptyLabels() Labels { - return Labels{} -} - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return -} - -// New returns a sorted Labels from the given labels. -// The caller has to guarantee that all label names are unique. -func New(ls ...Label) Labels { - slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - size := labelsSize(ls) - buf := make([]byte, size) - marshalLabelsToSizedBuffer(ls, buf) - return Labels{data: yoloString(buf)} -} - -// FromStrings creates new labels from pairs of strings. -func FromStrings(ss ...string) Labels { - if len(ss)%2 != 0 { - panic("invalid number of strings") - } - ls := make([]Label, 0, len(ss)/2) - for i := 0; i < len(ss); i += 2 { - ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) - } - - return New(ls...) -} - -// Compare compares the two label sets. -// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -func Compare(a, b Labels) int { - // Find the first byte in the string where a and b differ. - shorter, longer := a.data, b.data - if len(b.data) < len(a.data) { - shorter, longer = b.data, a.data - } - i := 0 - // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) - for ; i < len(shorter)-8; i += 8 { - if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { - break - } - } - // Now go 1 byte at a time. - for ; i < len(shorter); i++ { - if shorter[i] != longer[i] { - break - } - } - if i == len(shorter) { - // One Labels was a prefix of the other; the set with fewer labels compares lower. - return len(a.data) - len(b.data) - } - - // Now we know that there is some difference before the end of a and b. - // Go back through the fields and find which field that difference is in. - firstCharDifferent, i := i, 0 - size, nextI := decodeSize(a.data, i) - for nextI+size <= firstCharDifferent { - i = nextI + size - size, nextI = decodeSize(a.data, i) - } - // Difference is inside this entry. - aStr, _ := decodeString(a.data, i) - bStr, _ := decodeString(b.data, i) - if aStr < bStr { - return -1 - } - return +1 -} - -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. -func (ls *Labels) CopyFrom(b Labels) { - ls.data = b.data // strings are immutable -} - -// IsEmpty returns true if ls represents an empty set of labels. -func (ls Labels) IsEmpty() bool { - return len(ls.data) == 0 -} - -// Len returns the number of labels; it is relatively slow. -func (ls Labels) Len() int { - count := 0 - for i := 0; i < len(ls.data); { - var size int - size, i = decodeSize(ls.data, i) - i += size - size, i = decodeSize(ls.data, i) - i += size - count++ - } - return count -} - -// Range calls f on each label. -func (ls Labels) Range(f func(l Label)) { - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - f(Label{Name: lName, Value: lValue}) - } -} - -// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. -func (ls Labels) Validate(f func(l Label) error) error { - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - err := f(Label{Name: lName, Value: lValue}) - if err != nil { - return err - } - } - return nil -} - -// DropMetricName returns Labels with "__name__" removed. -func (ls Labels) DropMetricName() Labels { - for i := 0; i < len(ls.data); { - lName, i2 := decodeString(ls.data, i) - size, i2 := decodeSize(ls.data, i2) - i2 += size - if lName == MetricName { - if i == 0 { // Make common case fast with no allocations. - ls.data = ls.data[i2:] - } else { - ls.data = ls.data[:i] + ls.data[i2:] - } - break - } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. - break - } - i = i2 - } - return ls -} - -// InternStrings is a no-op because it would only save when the whole set of labels is identical. -func (ls *Labels) InternStrings(intern func(string) string) { -} - -// ReleaseStrings is a no-op for the same reason as InternStrings. -func (ls Labels) ReleaseStrings(release func(string)) { -} - -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - b.base.Range(func(l Label) { - if l.Value == "" { - b.del = append(b.del, l.Name) - } - }) -} - -// Labels returns the labels from the builder. -// If no modifications were made, the original labels are returned. -func (b *Builder) Labels() Labels { - if len(b.del) == 0 && len(b.add) == 0 { - return b.base - } - - slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) - slices.Sort(b.del) - a, d := 0, 0 - - bufSize := len(b.base.data) + labelsSize(b.add) - buf := make([]byte, 0, bufSize) - for pos := 0; pos < len(b.base.data); { - oldPos := pos - var lName string - lName, pos = decodeString(b.base.data, pos) - _, pos = decodeString(b.base.data, pos) - for d < len(b.del) && b.del[d] < lName { - d++ - } - if d < len(b.del) && b.del[d] == lName { - continue // This label has been deleted. - } - for ; a < len(b.add) && b.add[a].Name < lName; a++ { - buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. - } - if a < len(b.add) && b.add[a].Name == lName { - buf = appendLabelTo(buf, &b.add[a]) - a++ - continue // This label has been replaced. - } - buf = append(buf, b.base.data[oldPos:pos]...) - } - // We have come to the end of the base set; add any remaining labels. - for ; a < len(b.add); a++ { - buf = appendLabelTo(buf, &b.add[a]) - } - return Labels{data: yoloString(buf)} -} - -func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { - i := len(data) - for index := len(lbls) - 1; index >= 0; index-- { - size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) - i -= size - } - return len(data) - i -} - -func marshalLabelToSizedBuffer(m *Label, data []byte) int { - i := len(data) - i -= len(m.Value) - copy(data[i:], m.Value) - i = encodeSize(data, i, len(m.Value)) - i -= len(m.Name) - copy(data[i:], m.Name) - i = encodeSize(data, i, len(m.Name)) - return len(data) - i -} - -func sizeVarint(x uint64) (n int) { - // Most common case first - if x < 1<<7 { - return 1 - } - if x >= 1<<56 { - return 9 - } - if x >= 1<<28 { - x >>= 28 - n = 4 - } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 -} - -func encodeVarint(data []byte, offset int, v uint64) int { - offset -= sizeVarint(v) - base := offset - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return base -} - -// Special code for the common case that a size is less than 128 -func encodeSize(data []byte, offset, v int) int { - if v < 1<<7 { - offset-- - data[offset] = uint8(v) - return offset - } - return encodeVarint(data, offset, uint64(v)) -} - -func labelsSize(lbls []Label) (n int) { - // we just encode name/value/name/value, without any extra tags or length bytes - for _, e := range lbls { - n += labelSize(&e) - } - return n -} - -func labelSize(m *Label) (n int) { - // strings are encoded as length followed by contents. - l := len(m.Name) - n += l + sizeVarint(uint64(l)) - l = len(m.Value) - n += l + sizeVarint(uint64(l)) - return n -} - -func appendLabelTo(buf []byte, m *Label) []byte { - size := labelSize(m) - sizeRequired := len(buf) + size - if cap(buf) >= sizeRequired { - buf = buf[:sizeRequired] - } else { - bufSize := cap(buf) - // Double size of buffer each time it needs to grow, to amortise copying cost. - for bufSize < sizeRequired { - bufSize = bufSize*2 + 1 - } - newBuf := make([]byte, sizeRequired, bufSize) - copy(newBuf, buf) - buf = newBuf - } - marshalLabelToSizedBuffer(m, buf) - return buf -} - -// ScratchBuilder allows efficient construction of a Labels from scratch. -type ScratchBuilder struct { - add []Label - output Labels - overwriteBuffer []byte -} - -// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. -func NewScratchBuilder(n int) ScratchBuilder { - return ScratchBuilder{add: make([]Label, 0, n)} -} - -func (b *ScratchBuilder) Reset() { - b.add = b.add[:0] - b.output = EmptyLabels() -} - -// Add a name/value pair. -// Note if you Add the same name twice you will get a duplicate label, which is invalid. -func (b *ScratchBuilder) Add(name, value string) { - b.add = append(b.add, Label{Name: name, Value: value}) -} - -// Add a name/value pair, using []byte instead of string to reduce memory allocations. -// The values must remain live until Labels() is called. -func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { - b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) -} - -// Sort the labels added so far by name. -func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) -} - -// Assign is for when you already have a Labels which you want this ScratchBuilder to return. -func (b *ScratchBuilder) Assign(l Labels) { - b.output = l -} - -// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. -// Note: if you want them sorted, call Sort() first. -func (b *ScratchBuilder) Labels() Labels { - if b.output.IsEmpty() { - size := labelsSize(b.add) - buf := make([]byte, size) - marshalLabelsToSizedBuffer(b.add, buf) - b.output = Labels{data: yoloString(buf)} - } - return b.output -} - -// Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls, or any strings fetched from it. -func (b *ScratchBuilder) Overwrite(ls *Labels) { - size := labelsSize(b.add) - if size <= cap(b.overwriteBuffer) { - b.overwriteBuffer = b.overwriteBuffer[:size] - } else { - b.overwriteBuffer = make([]byte, size) - } - marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) - ls.data = yoloString(b.overwriteBuffer) -} - -// Symbol-table is no-op, just for api parity with dedupelabels. -type SymbolTable struct{} - -func NewSymbolTable() *SymbolTable { return nil } - -func (t *SymbolTable) Len() int { return 0 } - -// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. -func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { - return NewBuilder(EmptyLabels()) -} - -// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. -func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { - return NewScratchBuilder(n) -} - -func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { - // no-op -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go deleted file mode 100644 index 8e220e39..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package labels - -import ( - "bytes" - "strconv" -) - -// MatchType is an enum for label matching types. -type MatchType int - -// Possible MatchTypes. -const ( - MatchEqual MatchType = iota - MatchNotEqual - MatchRegexp - MatchNotRegexp -) - -var matchTypeToStr = [...]string{ - MatchEqual: "=", - MatchNotEqual: "!=", - MatchRegexp: "=~", - MatchNotRegexp: "!~", -} - -func (m MatchType) String() string { - if m < MatchEqual || m > MatchNotRegexp { - panic("unknown match type") - } - return matchTypeToStr[m] -} - -// Matcher models the matching of a label. -type Matcher struct { - Type MatchType - Name string - Value string - - re *FastRegexMatcher -} - -// NewMatcher returns a matcher object. -func NewMatcher(t MatchType, n, v string) (*Matcher, error) { - m := &Matcher{ - Type: t, - Name: n, - Value: v, - } - if t == MatchRegexp || t == MatchNotRegexp { - re, err := NewFastRegexMatcher(v) - if err != nil { - return nil, err - } - m.re = re - } - return m, nil -} - -// MustNewMatcher panics on error - only for use in tests! -func MustNewMatcher(mt MatchType, name, val string) *Matcher { - m, err := NewMatcher(mt, name, val) - if err != nil { - panic(err) - } - return m -} - -func (m *Matcher) String() string { - // Start a buffer with a pre-allocated size on stack to cover most needs. - var bytea [1024]byte - b := bytes.NewBuffer(bytea[:0]) - - if m.shouldQuoteName() { - b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) - } else { - b.WriteString(m.Name) - } - b.WriteString(m.Type.String()) - b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) - - return b.String() -} - -func (m *Matcher) shouldQuoteName() bool { - for i, c := range m.Name { - if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { - continue - } - return true - } - return false -} - -// Matches returns whether the matcher matches the given string value. -func (m *Matcher) Matches(s string) bool { - switch m.Type { - case MatchEqual: - return s == m.Value - case MatchNotEqual: - return s != m.Value - case MatchRegexp: - return m.re.MatchString(s) - case MatchNotRegexp: - return !m.re.MatchString(s) - } - panic("labels.Matcher.Matches: invalid match type") -} - -// Inverse returns a matcher that matches the opposite. -func (m *Matcher) Inverse() (*Matcher, error) { - switch m.Type { - case MatchEqual: - return NewMatcher(MatchNotEqual, m.Name, m.Value) - case MatchNotEqual: - return NewMatcher(MatchEqual, m.Name, m.Value) - case MatchRegexp: - return NewMatcher(MatchNotRegexp, m.Name, m.Value) - case MatchNotRegexp: - return NewMatcher(MatchRegexp, m.Name, m.Value) - } - panic("labels.Matcher.Matches: invalid match type") -} - -// GetRegexString returns the regex string. -func (m *Matcher) GetRegexString() string { - if m.re == nil { - return "" - } - return m.re.GetRegexString() -} - -// SetMatches returns a set of equality matchers for the current regex matchers if possible. -// For examples the regexp `a(b|f)` will returns "ab" and "af". -// Returns nil if we can't replace the regexp by only equality matchers. -func (m *Matcher) SetMatches() []string { - if m.re == nil { - return nil - } - return m.re.SetMatches() -} - -// Prefix returns the required prefix of the value to match, if possible. -// It will be empty if it's an equality matcher or if the prefix can't be determined. -func (m *Matcher) Prefix() string { - if m.re == nil { - return "" - } - return m.re.prefix -} - -// IsRegexOptimized returns whether regex is optimized. -func (m *Matcher) IsRegexOptimized() bool { - if m.re == nil { - return false - } - return m.re.IsOptimized() -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go deleted file mode 100644 index b484e271..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ /dev/null @@ -1,942 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package labels - -import ( - "slices" - "strings" - "unicode/utf8" - - "github.com/grafana/regexp" - "github.com/grafana/regexp/syntax" -) - -const ( - maxSetMatches = 256 - - // The minimum number of alternate values a regex should have to trigger - // the optimization done by optimizeEqualStringMatchers() and so use a map - // to match values instead of iterating over a list. This value has - // been computed running BenchmarkOptimizeEqualStringMatchers. - minEqualMultiStringMatcherMapThreshold = 16 -) - -type FastRegexMatcher struct { - // Under some conditions, re is nil because the expression is never parsed. - // We store the original string to be able to return it in GetRegexString(). - reString string - re *regexp.Regexp - - setMatches []string - stringMatcher StringMatcher - prefix string - suffix string - contains string - - // matchString is the "compiled" function to run by MatchString(). - matchString func(string) bool -} - -func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { - m := &FastRegexMatcher{ - reString: v, - } - - m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v) - if m.stringMatcher != nil { - // If we already have a string matcher, we don't need to parse the regex - // or compile the matchString function. This also avoids the behavior in - // compileMatchStringFunction where it prefers to use setMatches when - // available, even if the string matcher is faster. - m.matchString = m.stringMatcher.Matches - } else { - parsed, err := syntax.Parse(v, syntax.Perl) - if err != nil { - return nil, err - } - // Simplify the syntax tree to run faster. - parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") - if err != nil { - return nil, err - } - if parsed.Op == syntax.OpConcat { - m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) - } - if matches, caseSensitive := findSetMatches(parsed); caseSensitive { - m.setMatches = matches - } - m.stringMatcher = stringMatcherFromRegexp(parsed) - m.matchString = m.compileMatchStringFunction() - } - - return m, nil -} - -// compileMatchStringFunction returns the function to run by MatchString(). -func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { - // If the only optimization available is the string matcher, then we can just run it. - if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil { - return m.stringMatcher.Matches - } - - return func(s string) bool { - if len(m.setMatches) != 0 { - for _, match := range m.setMatches { - if match == s { - return true - } - } - return false - } - if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { - return false - } - if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { - return false - } - if m.contains != "" && !strings.Contains(s, m.contains) { - return false - } - if m.stringMatcher != nil { - return m.stringMatcher.Matches(s) - } - return m.re.MatchString(s) - } -} - -// IsOptimized returns true if any fast-path optimization is applied to the -// regex matcher. -func (m *FastRegexMatcher) IsOptimized() bool { - return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != "" -} - -// findSetMatches extract equality matches from a regexp. -// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains -// a mix of case sensitive and case insensitive matchers. -func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) { - clearBeginEndText(re) - - return findSetMatchesInternal(re, "") -} - -func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) { - switch re.Op { - case syntax.OpBeginText: - // Correctly handling the begin text operator inside a regex is tricky, - // so in this case we fallback to the regex engine. - return nil, false - case syntax.OpEndText: - // Correctly handling the end text operator inside a regex is tricky, - // so in this case we fallback to the regex engine. - return nil, false - case syntax.OpLiteral: - return []string{base + string(re.Rune)}, isCaseSensitive(re) - case syntax.OpEmptyMatch: - if base != "" { - return []string{base}, isCaseSensitive(re) - } - case syntax.OpAlternate: - return findSetMatchesFromAlternate(re, base) - case syntax.OpCapture: - clearCapture(re) - return findSetMatchesInternal(re, base) - case syntax.OpConcat: - return findSetMatchesFromConcat(re, base) - case syntax.OpCharClass: - if len(re.Rune)%2 != 0 { - return nil, false - } - var matches []string - var totalSet int - for i := 0; i+1 < len(re.Rune); i += 2 { - totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1 - } - // limits the total characters that can be used to create matches. - // In some case like negation [^0-9] a lot of possibilities exists and that - // can create thousands of possible matches at which points we're better off using regexp. - if totalSet > maxSetMatches { - return nil, false - } - for i := 0; i+1 < len(re.Rune); i += 2 { - lo, hi := re.Rune[i], re.Rune[i+1] - for c := lo; c <= hi; c++ { - matches = append(matches, base+string(c)) - } - } - return matches, isCaseSensitive(re) - default: - return nil, false - } - return nil, false -} - -func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { - if len(re.Sub) == 0 { - return nil, false - } - clearCapture(re.Sub...) - - matches = []string{base} - - for i := 0; i < len(re.Sub); i++ { - var newMatches []string - for j, b := range matches { - m, caseSensitive := findSetMatchesInternal(re.Sub[i], b) - if m == nil { - return nil, false - } - if tooManyMatches(newMatches, m...) { - return nil, false - } - - // All matches must have the same case sensitivity. If it's the first set of matches - // returned, we store its sensitivity as the expected case, and then we'll check all - // other ones. - if i == 0 && j == 0 { - matchesCaseSensitive = caseSensitive - } - if matchesCaseSensitive != caseSensitive { - return nil, false - } - - newMatches = append(newMatches, m...) - } - matches = newMatches - } - - return matches, matchesCaseSensitive -} - -func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { - for i, sub := range re.Sub { - found, caseSensitive := findSetMatchesInternal(sub, base) - if found == nil { - return nil, false - } - if tooManyMatches(matches, found...) { - return nil, false - } - - // All matches must have the same case sensitivity. If it's the first set of matches - // returned, we store its sensitivity as the expected case, and then we'll check all - // other ones. - if i == 0 { - matchesCaseSensitive = caseSensitive - } - if matchesCaseSensitive != caseSensitive { - return nil, false - } - - matches = append(matches, found...) - } - - return matches, matchesCaseSensitive -} - -// clearCapture removes capture operation as they are not used for matching. -func clearCapture(regs ...*syntax.Regexp) { - for _, r := range regs { - // Iterate on the regexp because capture groups could be nested. - for r.Op == syntax.OpCapture { - *r = *r.Sub[0] - } - } -} - -// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string. -func clearBeginEndText(re *syntax.Regexp) { - // Do not clear begin/end text from an alternate operator because it could - // change the actual regexp properties. - if re.Op == syntax.OpAlternate { - return - } - - if len(re.Sub) == 0 { - return - } - if len(re.Sub) == 1 { - if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText { - // We need to remove this element. Since it's the only one, we convert into a matcher of an empty string. - // OpEmptyMatch is regexp's nop operator. - re.Op = syntax.OpEmptyMatch - re.Sub = nil - return - } - } - if re.Sub[0].Op == syntax.OpBeginText { - re.Sub = re.Sub[1:] - } - if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText { - re.Sub = re.Sub[:len(re.Sub)-1] - } -} - -// isCaseInsensitive tells if a regexp is case insensitive. -// The flag should be check at each level of the syntax tree. -func isCaseInsensitive(reg *syntax.Regexp) bool { - return (reg.Flags & syntax.FoldCase) != 0 -} - -// isCaseSensitive tells if a regexp is case sensitive. -// The flag should be check at each level of the syntax tree. -func isCaseSensitive(reg *syntax.Regexp) bool { - return !isCaseInsensitive(reg) -} - -// tooManyMatches guards against creating too many set matches. -func tooManyMatches(matches []string, added ...string) bool { - return len(matches)+len(added) > maxSetMatches -} - -func (m *FastRegexMatcher) MatchString(s string) bool { - return m.matchString(s) -} - -func (m *FastRegexMatcher) SetMatches() []string { - // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will - // also get manipulated in the cached FastRegexMatcher instance. - return slices.Clone(m.setMatches) -} - -func (m *FastRegexMatcher) GetRegexString() string { - return m.reString -} - -// optimizeAlternatingLiterals optimizes a regex of the form -// -// `literal1|literal2|literal3|...` -// -// this function returns an optimized StringMatcher or nil if the regex -// cannot be optimized in this way, and a list of setMatches up to maxSetMatches. -func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { - if len(s) == 0 { - return emptyStringMatcher{}, nil - } - - estimatedAlternates := strings.Count(s, "|") + 1 - - // If there are no alternates, check if the string is a literal - if estimatedAlternates == 1 { - if regexp.QuoteMeta(s) == s { - return &equalStringMatcher{s: s, caseSensitive: true}, []string{s} - } - return nil, nil - } - - multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates) - - for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { - // Split the string into the next literal and the remainder - subMatch := s[:end] - s = s[end+1:] - - // break if any of the submatches are not literals - if regexp.QuoteMeta(subMatch) != subMatch { - return nil, nil - } - - multiMatcher.add(subMatch) - } - - // break if the remainder is not a literal - if regexp.QuoteMeta(s) != s { - return nil, nil - } - multiMatcher.add(s) - - return multiMatcher, multiMatcher.setMatches() -} - -// optimizeConcatRegex returns literal prefix/suffix text that can be safely -// checked against the label value before running the regexp matcher. -func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { - sub := r.Sub - - // We can safely remove begin and end text matchers respectively - // at the beginning and end of the regexp. - if len(sub) > 0 && sub[0].Op == syntax.OpBeginText { - sub = sub[1:] - } - if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText { - sub = sub[:len(sub)-1] - } - - if len(sub) == 0 { - return - } - - // Given Prometheus regex matchers are always anchored to the begin/end - // of the text, if the first/last operations are literals, we can safely - // treat them as prefix/suffix. - if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 { - prefix = string(sub[0].Rune) - } - if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 { - suffix = string(sub[last].Rune) - } - - // If contains any literal which is not a prefix/suffix, we keep the - // 1st one. We do not keep the whole list of literals to simplify the - // fast path. - for i := 1; i < len(sub)-1; i++ { - if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { - contains = string(sub[i].Rune) - break - } - } - - return -} - -// StringMatcher is a matcher that matches a string in place of a regular expression. -type StringMatcher interface { - Matches(s string) bool -} - -// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher. -// It returns nil if the regexp is not supported. -func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { - clearBeginEndText(re) - - m := stringMatcherFromRegexpInternal(re) - m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold) - - return m -} - -func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { - clearCapture(re) - - switch re.Op { - case syntax.OpBeginText: - // Correctly handling the begin text operator inside a regex is tricky, - // so in this case we fallback to the regex engine. - return nil - case syntax.OpEndText: - // Correctly handling the end text operator inside a regex is tricky, - // so in this case we fallback to the regex engine. - return nil - case syntax.OpPlus: - if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { - return nil - } - return &anyNonEmptyStringMatcher{ - matchNL: re.Sub[0].Op == syntax.OpAnyChar, - } - case syntax.OpStar: - if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { - return nil - } - - // If the newline is valid, than this matcher literally match any string (even empty). - if re.Sub[0].Op == syntax.OpAnyChar { - return trueMatcher{} - } - - // Any string is fine (including an empty one), as far as it doesn't contain any newline. - return anyStringWithoutNewlineMatcher{} - case syntax.OpQuest: - // Only optimize for ".?". - if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) { - return nil - } - - return &zeroOrOneCharacterStringMatcher{ - matchNL: re.Sub[0].Op == syntax.OpAnyChar, - } - case syntax.OpEmptyMatch: - return emptyStringMatcher{} - - case syntax.OpLiteral: - return &equalStringMatcher{ - s: string(re.Rune), - caseSensitive: !isCaseInsensitive(re), - } - case syntax.OpAlternate: - or := make([]StringMatcher, 0, len(re.Sub)) - for _, sub := range re.Sub { - m := stringMatcherFromRegexpInternal(sub) - if m == nil { - return nil - } - or = append(or, m) - } - return orStringMatcher(or) - case syntax.OpConcat: - clearCapture(re.Sub...) - - if len(re.Sub) == 0 { - return emptyStringMatcher{} - } - if len(re.Sub) == 1 { - return stringMatcherFromRegexpInternal(re.Sub[0]) - } - - var left, right StringMatcher - - // Let's try to find if there's a first and last any matchers. - if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest { - left = stringMatcherFromRegexpInternal(re.Sub[0]) - if left == nil { - return nil - } - re.Sub = re.Sub[1:] - } - if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest { - right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1]) - if right == nil { - return nil - } - re.Sub = re.Sub[:len(re.Sub)-1] - } - - matches, matchesCaseSensitive := findSetMatchesInternal(re, "") - - if len(matches) == 0 && len(re.Sub) == 2 { - // We have not find fixed set matches. We look for other known cases that - // we can optimize. - switch { - // Prefix is literal. - case right == nil && re.Sub[0].Op == syntax.OpLiteral: - right = stringMatcherFromRegexpInternal(re.Sub[1]) - if right != nil { - matches = []string{string(re.Sub[0].Rune)} - matchesCaseSensitive = !isCaseInsensitive(re.Sub[0]) - } - - // Suffix is literal. - case left == nil && re.Sub[1].Op == syntax.OpLiteral: - left = stringMatcherFromRegexpInternal(re.Sub[0]) - if left != nil { - matches = []string{string(re.Sub[1].Rune)} - matchesCaseSensitive = !isCaseInsensitive(re.Sub[1]) - } - } - } - - // Ensure we've found some literals to match (optionally with a left and/or right matcher). - // If not, then this optimization doesn't trigger. - if len(matches) == 0 { - return nil - } - - // Use the right (and best) matcher based on what we've found. - switch { - // No left and right matchers (only fixed set matches). - case left == nil && right == nil: - // if there's no any matchers on both side it's a concat of literals - or := make([]StringMatcher, 0, len(matches)) - for _, match := range matches { - or = append(or, &equalStringMatcher{ - s: match, - caseSensitive: matchesCaseSensitive, - }) - } - return orStringMatcher(or) - - // Right matcher with 1 fixed set match. - case left == nil && len(matches) == 1: - return &literalPrefixStringMatcher{ - prefix: matches[0], - prefixCaseSensitive: matchesCaseSensitive, - right: right, - } - - // Left matcher with 1 fixed set match. - case right == nil && len(matches) == 1: - return &literalSuffixStringMatcher{ - left: left, - suffix: matches[0], - suffixCaseSensitive: matchesCaseSensitive, - } - - // We found literals in the middle. We can trigger the fast path only if - // the matches are case sensitive because containsStringMatcher doesn't - // support case insensitive. - case matchesCaseSensitive: - return &containsStringMatcher{ - substrings: matches, - left: left, - right: right, - } - } - } - return nil -} - -// containsStringMatcher matches a string if it contains any of the substrings. -// If left and right are not nil, it's a contains operation where left and right must match. -// If left is nil, it's a hasPrefix operation and right must match. -// Finally, if right is nil it's a hasSuffix operation and left must match. -type containsStringMatcher struct { - // The matcher that must match the left side. Can be nil. - left StringMatcher - - // At least one of these strings must match in the "middle", between left and right matchers. - substrings []string - - // The matcher that must match the right side. Can be nil. - right StringMatcher -} - -func (m *containsStringMatcher) Matches(s string) bool { - for _, substr := range m.substrings { - switch { - case m.right != nil && m.left != nil: - searchStartPos := 0 - - for { - pos := strings.Index(s[searchStartPos:], substr) - if pos < 0 { - break - } - - // Since we started searching from searchStartPos, we have to add that offset - // to get the actual position of the substring inside the text. - pos += searchStartPos - - // If both the left and right matchers match, then we can stop searching because - // we've found a match. - if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) { - return true - } - - // Continue searching for another occurrence of the substring inside the text. - searchStartPos = pos + 1 - } - case m.left != nil: - // If we have to check for characters on the left then we need to match a suffix. - if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) { - return true - } - case m.right != nil: - if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) { - return true - } - } - } - return false -} - -// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher. -type literalPrefixStringMatcher struct { - prefix string - prefixCaseSensitive bool - - // The matcher that must match the right side. Can be nil. - right StringMatcher -} - -func (m *literalPrefixStringMatcher) Matches(s string) bool { - // Ensure the prefix matches. - if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) { - return false - } - if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) { - return false - } - - // Ensure the right side matches. - return m.right.Matches(s[len(m.prefix):]) -} - -// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher. -type literalSuffixStringMatcher struct { - // The matcher that must match the left side. Can be nil. - left StringMatcher - - suffix string - suffixCaseSensitive bool -} - -func (m *literalSuffixStringMatcher) Matches(s string) bool { - // Ensure the suffix matches. - if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) { - return false - } - if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) { - return false - } - - // Ensure the left side matches. - return m.left.Matches(s[:len(s)-len(m.suffix)]) -} - -// emptyStringMatcher matches an empty string. -type emptyStringMatcher struct{} - -func (m emptyStringMatcher) Matches(s string) bool { - return len(s) == 0 -} - -// orStringMatcher matches any of the sub-matchers. -type orStringMatcher []StringMatcher - -func (m orStringMatcher) Matches(s string) bool { - for _, matcher := range m { - if matcher.Matches(s) { - return true - } - } - return false -} - -// equalStringMatcher matches a string exactly and support case insensitive. -type equalStringMatcher struct { - s string - caseSensitive bool -} - -func (m *equalStringMatcher) Matches(s string) bool { - if m.caseSensitive { - return m.s == s - } - return strings.EqualFold(m.s, s) -} - -type multiStringMatcherBuilder interface { - StringMatcher - add(s string) - setMatches() []string -} - -func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder { - // If the estimated size is low enough, it's faster to use a slice instead of a map. - if estimatedSize < minEqualMultiStringMatcherMapThreshold { - return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} - } - - return &equalMultiStringMapMatcher{ - values: make(map[string]struct{}, estimatedSize), - caseSensitive: caseSensitive, - } -} - -// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values. -type equalMultiStringSliceMatcher struct { - values []string - - caseSensitive bool -} - -func (m *equalMultiStringSliceMatcher) add(s string) { - m.values = append(m.values, s) -} - -func (m *equalMultiStringSliceMatcher) setMatches() []string { - return m.values -} - -func (m *equalMultiStringSliceMatcher) Matches(s string) bool { - if m.caseSensitive { - for _, v := range m.values { - if s == v { - return true - } - } - } else { - for _, v := range m.values { - if strings.EqualFold(s, v) { - return true - } - } - } - return false -} - -// equalMultiStringMapMatcher matches a string exactly against a map of valid values. -type equalMultiStringMapMatcher struct { - // values contains values to match a string against. If the matching is case insensitive, - // the values here must be lowercase. - values map[string]struct{} - - caseSensitive bool -} - -func (m *equalMultiStringMapMatcher) add(s string) { - if !m.caseSensitive { - s = strings.ToLower(s) - } - - m.values[s] = struct{}{} -} - -func (m *equalMultiStringMapMatcher) setMatches() []string { - if len(m.values) >= maxSetMatches { - return nil - } - - matches := make([]string, 0, len(m.values)) - for s := range m.values { - matches = append(matches, s) - } - return matches -} - -func (m *equalMultiStringMapMatcher) Matches(s string) bool { - if !m.caseSensitive { - s = strings.ToLower(s) - } - - _, ok := m.values[s] - return ok -} - -// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string -// (including an empty one) as far as it doesn't contain any newline character. -type anyStringWithoutNewlineMatcher struct{} - -func (m anyStringWithoutNewlineMatcher) Matches(s string) bool { - // We need to make sure it doesn't contain a newline. Since the newline is - // an ASCII character, we can use strings.IndexByte(). - return strings.IndexByte(s, '\n') == -1 -} - -// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string. -type anyNonEmptyStringMatcher struct { - matchNL bool -} - -func (m *anyNonEmptyStringMatcher) Matches(s string) bool { - if m.matchNL { - // It's OK if the string contains a newline so we just need to make - // sure it's non-empty. - return len(s) > 0 - } - - // We need to make sure it non-empty and doesn't contain a newline. - // Since the newline is an ASCII character, we can use strings.IndexByte(). - return len(s) > 0 && strings.IndexByte(s, '\n') == -1 -} - -// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence -// of any character. The newline character is matches only if matchNL is set to true. -type zeroOrOneCharacterStringMatcher struct { - matchNL bool -} - -func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { - // If there's more than one rune in the string, then it can't match. - if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { - // Size is 0 for empty strings, 1 for invalid rune. - // Empty string matches, invalid rune matches if there isn't anything else. - return size == len(s) - } else if size < len(s) { - return false - } - - // No need to check for the newline if the string is empty or matching a newline is OK. - if m.matchNL || len(s) == 0 { - return true - } - - return s[0] != '\n' -} - -// trueMatcher is a stringMatcher which matches any string (always returns true). -type trueMatcher struct{} - -func (m trueMatcher) Matches(_ string) bool { - return true -} - -// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an -// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In -// this specific case, when we have many strings to match against we can use a map instead -// of iterating over the list of strings. -func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher { - var ( - caseSensitive bool - caseSensitiveSet bool - numValues int - ) - - // Analyse the input StringMatcher to count the number of occurrences - // and ensure all of them have the same case sensitivity. - analyseCallback := func(matcher *equalStringMatcher) bool { - // Ensure we don't have mixed case sensitivity. - if caseSensitiveSet && caseSensitive != matcher.caseSensitive { - return false - } else if !caseSensitiveSet { - caseSensitive = matcher.caseSensitive - caseSensitiveSet = true - } - - numValues++ - return true - } - - if !findEqualStringMatchers(input, analyseCallback) { - return input - } - - // If the number of values found is less than the threshold, then we should skip the optimization. - if numValues < threshold { - return input - } - - // Parse again the input StringMatcher to extract all values and storing them. - // We can skip the case sensitivity check because we've already checked it and - // if the code reach this point then it means all matchers have the same case sensitivity. - multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues) - - // Ignore the return value because we already iterated over the input StringMatcher - // and it was all good. - findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool { - multiMatcher.add(matcher.s) - return true - }) - - return multiMatcher -} - -// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each -// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only* -// composed by an alternation of equalStringMatcher. -func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool { - orInput, ok := input.(orStringMatcher) - if !ok { - return false - } - - for _, m := range orInput { - switch casted := m.(type) { - case orStringMatcher: - if !findEqualStringMatchers(m, callback) { - return false - } - - case *equalStringMatcher: - if !callback(casted) { - return false - } - - default: - // It's not an equal string matcher, so we have to stop searching - // cause this optimization can't be applied. - return false - } - } - - return true -} - -func hasPrefixCaseInsensitive(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} - -func hasSuffixCaseInsensitive(s, suffix string) bool { - return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go deleted file mode 100644 index 5e3e89fb..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !stringlabels && !dedupelabels - -package labels - -import ( - "github.com/cespare/xxhash/v2" -) - -// StableHash is a labels hashing implementation which is guaranteed to not change over time. -// This function should be used whenever labels hashing backward compatibility must be guaranteed. -func StableHash(ls Labels) uint64 { - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for i, v := range ls { - if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { - // If labels entry is 1KB+ do not allocate whole entry. - h := xxhash.New() - _, _ = h.Write(b) - for _, v := range ls[i:] { - _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) - _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, v.Name...) - b = append(b, seps[0]) - b = append(b, v.Value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go deleted file mode 100644 index 5912724f..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build dedupelabels - -package labels - -import ( - "github.com/cespare/xxhash/v2" -) - -// StableHash is a labels hashing implementation which is guaranteed to not change over time. -// This function should be used whenever labels hashing backward compatibility must be guaranteed. -func StableHash(ls Labels) uint64 { - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for pos := 0; pos < len(ls.data); { - name, newPos := decodeString(ls.syms, ls.data, pos) - value, newPos := decodeString(ls.syms, ls.data, newPos) - if len(b)+len(name)+len(value)+2 >= cap(b) { - // If labels entry is 1KB+, hash the rest of them via Write(). - h := xxhash.New() - _, _ = h.Write(b) - for pos < len(ls.data) { - name, pos = decodeString(ls.syms, ls.data, pos) - value, pos = decodeString(ls.syms, ls.data, pos) - _, _ = h.WriteString(name) - _, _ = h.Write(seps) - _, _ = h.WriteString(value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, name...) - b = append(b, seps[0]) - b = append(b, value...) - b = append(b, seps[0]) - pos = newPos - } - return xxhash.Sum64(b) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go deleted file mode 100644 index 3ad2027d..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build stringlabels - -package labels - -import ( - "github.com/cespare/xxhash/v2" -) - -// StableHash is a labels hashing implementation which is guaranteed to not change over time. -// This function should be used whenever labels hashing backward compatibility must be guaranteed. -func StableHash(ls Labels) uint64 { - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - var h *xxhash.Digest - for i := 0; i < len(ls.data); { - var v Label - v.Name, i = decodeString(ls.data, i) - v.Value, i = decodeString(ls.data, i) - if h == nil && len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { - // If labels entry is 1KB+, switch to Write API. Copy in the values up to this point. - h = xxhash.New() - _, _ = h.Write(b) - } - if h != nil { - _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) - _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) - continue - } - - b = append(b, v.Name...) - b = append(b, seps[0]) - b = append(b, v.Value...) - b = append(b, seps[0]) - } - if h != nil { - return h.Sum64() - } - return xxhash.Sum64(b) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go deleted file mode 100644 index d060def4..00000000 --- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package labels - -import ( - "bufio" - "fmt" - "os" - "strings" -) - -// Slice is a sortable slice of label sets. -type Slice []Labels - -func (s Slice) Len() int { return len(s) } -func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } - -// Selector holds constraints for matching against a label set. -type Selector []*Matcher - -// Matches returns whether the labels satisfy all matchers. -func (s Selector) Matches(labels Labels) bool { - for _, m := range s { - if v := labels.Get(m.Name); !m.Matches(v) { - return false - } - } - return true -} - -// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful -// to load testing data. -func ReadLabels(fn string, n int) ([]Labels, error) { - f, err := os.Open(fn) - if err != nil { - return nil, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - b := NewScratchBuilder(0) - - var mets []Labels - hashes := map[uint64]struct{}{} - i := 0 - - for scanner.Scan() && i < n { - b.Reset() - - r := strings.NewReplacer("\"", "", "{", "", "}", "") - s := r.Replace(scanner.Text()) - - labelChunks := strings.Split(s, ",") - for _, labelChunk := range labelChunks { - split := strings.Split(labelChunk, ":") - b.Add(split[0], split[1]) - } - // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - b.Sort() - m := b.Labels() - - h := m.Hash() - if _, ok := hashes[h]; ok { - continue - } - mets = append(mets, m) - hashes[h] = struct{}{} - i++ - } - - if i != n { - return mets, fmt.Errorf("requested %d metrics but found %d", n, i) - } - return mets, nil -} diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md deleted file mode 100644 index a33d7bfb..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/README.md +++ /dev/null @@ -1,9 +0,0 @@ -The compiled protobufs are version controlled and you won't normally need to -re-compile them when building Prometheus. - -If however you have modified the defs and do need to re-compile, run -`make proto` from the parent dir. - -In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in -your PATH. - diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.lock b/vendor/github.com/prometheus/prometheus/prompb/buf.lock deleted file mode 100644 index 30b0f084..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/buf.lock +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: gogo - repository: protobuf - branch: main - commit: 4df00b267f944190a229ce3695781e99 - digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= - create_time: 2021-08-10T00:14:28.345069Z diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml b/vendor/github.com/prometheus/prometheus/prompb/buf.yaml deleted file mode 100644 index 0f7ec134..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: v1 -name: buf.build/prometheus/prometheus -lint: - ignore_only: - ENUM_VALUE_PREFIX: - - remote.proto - - types.proto - - io/prometheus/client/metrics.proto - ENUM_ZERO_VALUE_SUFFIX: - - remote.proto - - types.proto - - io/prometheus/client/metrics.proto - PACKAGE_DIRECTORY_MATCH: - - remote.proto - - types.proto - PACKAGE_VERSION_SUFFIX: - - remote.proto - - types.proto - - io/prometheus/client/metrics.proto -deps: - - buf.build/gogo/protobuf diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go deleted file mode 100644 index 13d6e0f0..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prompb - -import ( - "sync" -) - -func (m Sample) T() int64 { return m.Timestamp } -func (m Sample) V() float64 { return m.Value } - -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - -func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { - size := r.Size() - data, ok := p.Get().(*[]byte) - if ok && cap(*data) >= size { - n, err := r.MarshalToSizedBuffer((*data)[:size]) - if err != nil { - return nil, err - } - return (*data)[:n], nil - } - return r.Marshal() -} diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go deleted file mode 100644 index 19318878..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ /dev/null @@ -1,1702 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: remote.proto - -package prompb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ReadRequest_ResponseType int32 - -const ( - // Server will return a single ReadResponse message with matched series that includes list of raw samples. - // It's recommended to use streamed response types instead. - // - // Response headers: - // Content-Type: "application/x-protobuf" - // Content-Encoding: "snappy" - ReadRequest_SAMPLES ReadRequest_ResponseType = 0 - // Server will stream a delimited ChunkedReadResponse message that - // contains XOR or HISTOGRAM(!) encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian - // uint32 for CRC32 Castagnoli checksum. - // - // Response headers: - // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" - // Content-Encoding: "" - ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1 -) - -var ReadRequest_ResponseType_name = map[int32]string{ - 0: "SAMPLES", - 1: "STREAMED_XOR_CHUNKS", -} - -var ReadRequest_ResponseType_value = map[string]int32{ - "SAMPLES": 0, - "STREAMED_XOR_CHUNKS": 1, -} - -func (x ReadRequest_ResponseType) String() string { - return proto.EnumName(ReadRequest_ResponseType_name, int32(x)) -} - -func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{1, 0} -} - -type WriteRequest struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` - Metadata []MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (m *WriteRequest) String() string { return proto.CompactTextString(m) } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{0} -} -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -func (m *WriteRequest) GetTimeseries() []TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -func (m *WriteRequest) GetMetadata() []MetricMetadata { - if m != nil { - return m.Metadata - } - return nil -} - -// ReadRequest represents a remote read request. -type ReadRequest struct { - Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - // accepted_response_types allows negotiating the content type of the response. - // - // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is - // implemented by server, error is returned. - // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. - AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (m *ReadRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{1} -} -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -func (m *ReadRequest) GetQueries() []*Query { - if m != nil { - return m.Queries - } - return nil -} - -func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType { - if m != nil { - return m.AcceptedResponseTypes - } - return nil -} - -// ReadResponse is a response when response_type equals SAMPLES. -type ReadResponse struct { - // In same order as the request's queries. - Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (m *ReadResponse) String() string { return proto.CompactTextString(m) } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{2} -} -func (m *ReadResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResponse.Merge(m, src) -} -func (m *ReadResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResponse proto.InternalMessageInfo - -func (m *ReadResponse) GetResults() []*QueryResult { - if m != nil { - return m.Results - } - return nil -} - -type Query struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` - Hints *ReadHints `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{3} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(m, src) -} -func (m *Query) XXX_Size() int { - return m.Size() -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -func (m *Query) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Query) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *Query) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -func (m *Query) GetHints() *ReadHints { - if m != nil { - return m.Hints - } - return nil -} - -type QueryResult struct { - // Samples within a time series must be ordered by time. - Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{4} -} -func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResult.Merge(m, src) -} -func (m *QueryResult) XXX_Size() int { - return m.Size() -} -func (m *QueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResult proto.InternalMessageInfo - -func (m *QueryResult) GetTimeseries() []*TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. -// We strictly stream full series after series, optionally split by time. This means that a single frame can contain -// partition of the single series, but once a new series is started to be streamed it means that no more chunks will -// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. -type ChunkedReadResponse struct { - ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"` - // query_index represents an index of the query from ReadRequest.queries these chunks relates to. - QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} } -func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } -func (*ChunkedReadResponse) ProtoMessage() {} -func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_eefc82927d57d89b, []int{5} -} -func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunkedReadResponse.Merge(m, src) -} -func (m *ChunkedReadResponse) XXX_Size() int { - return m.Size() -} -func (m *ChunkedReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo - -func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries { - if m != nil { - return m.ChunkedSeries - } - return nil -} - -func (m *ChunkedReadResponse) GetQueryIndex() int64 { - if m != nil { - return m.QueryIndex - } - return 0 -} - -func init() { - proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) - proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") - proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") - proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") - proto.RegisterType((*Query)(nil), "prometheus.Query") - proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult") - proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse") -} - -func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } - -var fileDescriptor_eefc82927d57d89b = []byte{ - // 496 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xee, 0x26, 0x69, 0x13, 0x8d, 0x43, 0x14, 0xb6, 0x2d, 0x09, 0x39, 0xa4, 0x91, 0xc5, 0x21, - 0x52, 0x51, 0x10, 0xa1, 0xe2, 0xd4, 0x03, 0x69, 0x89, 0x54, 0xa0, 0xe6, 0x67, 0x13, 0x04, 0x42, - 0x48, 0xd6, 0xc6, 0x1e, 0x35, 0x16, 0xf5, 0x4f, 0x77, 0xd7, 0x52, 0xf3, 0x16, 0x3c, 0x13, 0xa7, - 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x2d, 0x5c, 0xb8, 0xad, 0xbf, 0x3f, - 0xcf, 0xcc, 0xce, 0x42, 0x53, 0x60, 0x18, 0x2b, 0x1c, 0x25, 0x22, 0x56, 0x31, 0x85, 0x44, 0xc4, - 0x21, 0xaa, 0x25, 0xa6, 0xb2, 0x67, 0xa9, 0x55, 0x82, 0x32, 0x27, 0x7a, 0x7b, 0x17, 0xf1, 0x45, - 0xac, 0x8f, 0x8f, 0xb2, 0x53, 0x8e, 0xda, 0x5f, 0x09, 0x34, 0x3f, 0x88, 0x40, 0x21, 0xc3, 0xab, - 0x14, 0xa5, 0xa2, 0xc7, 0x00, 0x2a, 0x08, 0x51, 0xa2, 0x08, 0x50, 0x76, 0xc9, 0xa0, 0x3a, 0xb4, - 0xc6, 0xf7, 0x46, 0x7f, 0x42, 0x47, 0xf3, 0x20, 0xc4, 0x99, 0x66, 0x4f, 0x6a, 0x37, 0x3f, 0x0f, - 0xb6, 0x98, 0xa1, 0xa7, 0xc7, 0xd0, 0x08, 0x51, 0x71, 0x9f, 0x2b, 0xde, 0xad, 0x6a, 0x6f, 0xcf, - 0xf4, 0x3a, 0xa8, 0x44, 0xe0, 0x39, 0x85, 0xa2, 0xf0, 0x6f, 0x1c, 0x2f, 0x6b, 0x8d, 0x4a, 0xbb, - 0x6a, 0x7f, 0x27, 0x60, 0x31, 0xe4, 0x7e, 0x59, 0xd1, 0x21, 0xd4, 0xaf, 0x52, 0xb3, 0x9c, 0xbb, - 0x66, 0xe4, 0xbb, 0x14, 0xc5, 0x8a, 0x95, 0x0a, 0xfa, 0x19, 0x3a, 0xdc, 0xf3, 0x30, 0x51, 0xe8, - 0xbb, 0x02, 0x65, 0x12, 0x47, 0x12, 0x5d, 0x3d, 0x86, 0x6e, 0x65, 0x50, 0x1d, 0xb6, 0xc6, 0x0f, - 0x4c, 0xb3, 0xf1, 0x9b, 0x11, 0x2b, 0xd4, 0xf3, 0x55, 0x82, 0x6c, 0xbf, 0x0c, 0x31, 0x51, 0x69, - 0x1f, 0x41, 0xd3, 0x04, 0xa8, 0x05, 0xf5, 0xd9, 0xc4, 0x79, 0x7b, 0x3e, 0x9d, 0xb5, 0xb7, 0x68, - 0x07, 0x76, 0x67, 0x73, 0x36, 0x9d, 0x38, 0xd3, 0xe7, 0xee, 0xc7, 0x37, 0xcc, 0x3d, 0x3d, 0x7b, - 0xff, 0xfa, 0xd5, 0xac, 0x4d, 0xec, 0x49, 0xe6, 0xe2, 0x9b, 0x28, 0xfa, 0x18, 0xea, 0x02, 0x65, - 0x7a, 0xa9, 0xca, 0x86, 0x3a, 0xff, 0x36, 0xa4, 0x79, 0x56, 0xea, 0xec, 0x6f, 0x04, 0xb6, 0x35, - 0x41, 0x1f, 0x02, 0x95, 0x8a, 0x0b, 0xe5, 0xea, 0xa9, 0x2b, 0x1e, 0x26, 0x6e, 0x98, 0xe5, 0x90, - 0x61, 0x95, 0xb5, 0x35, 0x33, 0x2f, 0x09, 0x47, 0xd2, 0x21, 0xb4, 0x31, 0xf2, 0x6f, 0x6b, 0x2b, - 0x5a, 0xdb, 0xc2, 0xc8, 0x37, 0x95, 0x47, 0xd0, 0x08, 0xb9, 0xf2, 0x96, 0x28, 0x64, 0x71, 0x73, - 0x5d, 0xb3, 0xaa, 0x73, 0xbe, 0xc0, 0x4b, 0x27, 0x17, 0xb0, 0x8d, 0x92, 0x1e, 0xc2, 0xf6, 0x32, - 0x88, 0x94, 0xec, 0xd6, 0x06, 0x64, 0x68, 0x8d, 0xf7, 0xff, 0x1e, 0xee, 0x59, 0x46, 0xb2, 0x5c, - 0x63, 0x4f, 0xc1, 0x32, 0x9a, 0xa3, 0x4f, 0xff, 0x7f, 0xd3, 0xcc, 0x1d, 0xb3, 0xaf, 0x61, 0xf7, - 0x74, 0x99, 0x46, 0x5f, 0xb2, 0xcb, 0x31, 0xa6, 0xfa, 0x0c, 0x5a, 0x5e, 0x0e, 0xbb, 0xb7, 0x22, - 0xef, 0x9b, 0x91, 0x85, 0xb1, 0x48, 0xbd, 0xe3, 0x99, 0x9f, 0xf4, 0x00, 0xac, 0x6c, 0x8d, 0x56, - 0x6e, 0x10, 0xf9, 0x78, 0x5d, 0xcc, 0x09, 0x34, 0xf4, 0x22, 0x43, 0x4e, 0xf6, 0x6e, 0xd6, 0x7d, - 0xf2, 0x63, 0xdd, 0x27, 0xbf, 0xd6, 0x7d, 0xf2, 0x69, 0x27, 0xcb, 0x4d, 0x16, 0x8b, 0x1d, 0xfd, - 0x92, 0x9e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x13, 0x18, 0x12, 0x0a, 0x88, 0x03, 0x00, 0x00, -} - -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Metadata) > 0 { - for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.AcceptedResponseTypes) > 0 { - dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10) - var j1 int - for _, num := range m.AcceptedResponseTypes { - for num >= 1<<7 { - dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j1++ - } - dAtA2[j1] = uint8(num) - j1++ - } - i -= j1 - copy(dAtA[i:], dAtA2[:j1]) - i = encodeVarintRemote(dAtA, i, uint64(j1)) - i-- - dAtA[i] = 0x12 - } - if len(m.Queries) > 0 { - for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ReadResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Query) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Query) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Hints != nil { - { - size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintRemote(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintRemote(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.QueryIndex != 0 { - i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex)) - i-- - dAtA[i] = 0x10 - } - if len(m.ChunkedSeries) > 0 { - for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRemote(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { - offset -= sovRemote(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if len(m.Metadata) > 0 { - for _, e := range m.Metadata { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Queries) > 0 { - for _, e := range m.Queries { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if len(m.AcceptedResponseTypes) > 0 { - l = 0 - for _, e := range m.AcceptedResponseTypes { - l += sovRemote(uint64(e)) - } - n += 1 + sovRemote(uint64(l)) + l - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Query) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovRemote(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovRemote(uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if m.Hints != nil { - l = m.Hints.Size() - n += 1 + l + sovRemote(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *QueryResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ChunkedReadResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChunkedSeries) > 0 { - for _, e := range m.ChunkedSeries { - l = e.Size() - n += 1 + l + sovRemote(uint64(l)) - } - } - if m.QueryIndex != 0 { - n += 1 + sovRemote(uint64(m.QueryIndex)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRemote(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRemote(x uint64) (n int) { - return sovRemote(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metadata = append(m.Metadata, MetricMetadata{}) - if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queries = append(m.Queries, &Query{}) - if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType == 0 { - var v ReadRequest_ResponseType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ReadRequest_ResponseType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 { - m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount) - } - for iNdEx < postIndex { - var v ReadRequest_ResponseType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= ReadRequest_ResponseType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, &QueryResult{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Query) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Query: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Hints == nil { - m.Hints = &ReadHints{} - } - if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, &TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunkedReadResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunkedReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkedSeries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRemote - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRemote - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunkedSeries = append(m.ChunkedSeries, &ChunkedSeries{}) - if err := m.ChunkedSeries[len(m.ChunkedSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryIndex", wireType) - } - m.QueryIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRemote - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueryIndex |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRemote(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRemote - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRemote(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRemote - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRemote - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRemote - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRemote - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRemote - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRemote - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRemote = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRemote = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto deleted file mode 100644 index b4f82f5f..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2016 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; -package prometheus; - -option go_package = "prompb"; - -import "types.proto"; -import "gogoproto/gogo.proto"; - -message WriteRequest { - repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; - // Cortex uses this field to determine the source of the write request. - // We reserve it to avoid any compatibility issues. - reserved 2; - repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; -} - -// ReadRequest represents a remote read request. -message ReadRequest { - repeated Query queries = 1; - - enum ResponseType { - // Server will return a single ReadResponse message with matched series that includes list of raw samples. - // It's recommended to use streamed response types instead. - // - // Response headers: - // Content-Type: "application/x-protobuf" - // Content-Encoding: "snappy" - SAMPLES = 0; - // Server will stream a delimited ChunkedReadResponse message that - // contains XOR or HISTOGRAM(!) encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian - // uint32 for CRC32 Castagnoli checksum. - // - // Response headers: - // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" - // Content-Encoding: "" - STREAMED_XOR_CHUNKS = 1; - } - - // accepted_response_types allows negotiating the content type of the response. - // - // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is - // implemented by server, error is returned. - // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. - repeated ResponseType accepted_response_types = 2; -} - -// ReadResponse is a response when response_type equals SAMPLES. -message ReadResponse { - // In same order as the request's queries. - repeated QueryResult results = 1; -} - -message Query { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; - repeated prometheus.LabelMatcher matchers = 3; - prometheus.ReadHints hints = 4; -} - -message QueryResult { - // Samples within a time series must be ordered by time. - repeated prometheus.TimeSeries timeseries = 1; -} - -// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. -// We strictly stream full series after series, optionally split by time. This means that a single frame can contain -// partition of the single series, but once a new series is started to be streamed it means that no more chunks will -// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. -message ChunkedReadResponse { - repeated prometheus.ChunkedSeries chunked_series = 1; - - // query_index represents an index of the query from ReadRequest.queries these chunks relates to. - int64 query_index = 2; -} diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go deleted file mode 100644 index 93883daa..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ /dev/null @@ -1,4440 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: types.proto - -package prompb - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type MetricMetadata_MetricType int32 - -const ( - MetricMetadata_UNKNOWN MetricMetadata_MetricType = 0 - MetricMetadata_COUNTER MetricMetadata_MetricType = 1 - MetricMetadata_GAUGE MetricMetadata_MetricType = 2 - MetricMetadata_HISTOGRAM MetricMetadata_MetricType = 3 - MetricMetadata_GAUGEHISTOGRAM MetricMetadata_MetricType = 4 - MetricMetadata_SUMMARY MetricMetadata_MetricType = 5 - MetricMetadata_INFO MetricMetadata_MetricType = 6 - MetricMetadata_STATESET MetricMetadata_MetricType = 7 -) - -var MetricMetadata_MetricType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "COUNTER", - 2: "GAUGE", - 3: "HISTOGRAM", - 4: "GAUGEHISTOGRAM", - 5: "SUMMARY", - 6: "INFO", - 7: "STATESET", -} - -var MetricMetadata_MetricType_value = map[string]int32{ - "UNKNOWN": 0, - "COUNTER": 1, - "GAUGE": 2, - "HISTOGRAM": 3, - "GAUGEHISTOGRAM": 4, - "SUMMARY": 5, - "INFO": 6, - "STATESET": 7, -} - -func (x MetricMetadata_MetricType) String() string { - return proto.EnumName(MetricMetadata_MetricType_name, int32(x)) -} - -func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{0, 0} -} - -type Histogram_ResetHint int32 - -const ( - Histogram_UNKNOWN Histogram_ResetHint = 0 - Histogram_YES Histogram_ResetHint = 1 - Histogram_NO Histogram_ResetHint = 2 - Histogram_GAUGE Histogram_ResetHint = 3 -) - -var Histogram_ResetHint_name = map[int32]string{ - 0: "UNKNOWN", - 1: "YES", - 2: "NO", - 3: "GAUGE", -} - -var Histogram_ResetHint_value = map[string]int32{ - "UNKNOWN": 0, - "YES": 1, - "NO": 2, - "GAUGE": 3, -} - -func (x Histogram_ResetHint) String() string { - return proto.EnumName(Histogram_ResetHint_name, int32(x)) -} - -func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3, 0} -} - -type LabelMatcher_Type int32 - -const ( - LabelMatcher_EQ LabelMatcher_Type = 0 - LabelMatcher_NEQ LabelMatcher_Type = 1 - LabelMatcher_RE LabelMatcher_Type = 2 - LabelMatcher_NRE LabelMatcher_Type = 3 -) - -var LabelMatcher_Type_name = map[int32]string{ - 0: "EQ", - 1: "NEQ", - 2: "RE", - 3: "NRE", -} - -var LabelMatcher_Type_value = map[string]int32{ - "EQ": 0, - "NEQ": 1, - "RE": 2, - "NRE": 3, -} - -func (x LabelMatcher_Type) String() string { - return proto.EnumName(LabelMatcher_Type_name, int32(x)) -} - -func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8, 0} -} - -// We require this to match chunkenc.Encoding. -type Chunk_Encoding int32 - -const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 - Chunk_HISTOGRAM Chunk_Encoding = 2 - Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 -) - -var Chunk_Encoding_name = map[int32]string{ - 0: "UNKNOWN", - 1: "XOR", - 2: "HISTOGRAM", - 3: "FLOAT_HISTOGRAM", -} - -var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, - "HISTOGRAM": 2, - "FLOAT_HISTOGRAM": 3, -} - -func (x Chunk_Encoding) String() string { - return proto.EnumName(Chunk_Encoding_name, int32(x)) -} - -func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{10, 0} -} - -type MetricMetadata struct { - // Represents the metric type, these match the set from Prometheus. - // Refer to github.com/prometheus/common/model/metadata.go for details. - Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` - MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` - Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` - Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } -func (m *MetricMetadata) String() string { return proto.CompactTextString(m) } -func (*MetricMetadata) ProtoMessage() {} -func (*MetricMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{0} -} -func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricMetadata.Merge(m, src) -} -func (m *MetricMetadata) XXX_Size() int { - return m.Size() -} -func (m *MetricMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_MetricMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo - -func (m *MetricMetadata) GetType() MetricMetadata_MetricType { - if m != nil { - return m.Type - } - return MetricMetadata_UNKNOWN -} - -func (m *MetricMetadata) GetMetricFamilyName() string { - if m != nil { - return m.MetricFamilyName - } - return "" -} - -func (m *MetricMetadata) GetHelp() string { - if m != nil { - return m.Help - } - return "" -} - -func (m *MetricMetadata) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{1} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Sample) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -type Exemplar struct { - // Optional, can be empty. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{2} -} -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return m.Size() -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabels() []Label { - if m != nil { - return m.Labels - } - return nil -} - -func (m *Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -// A native histogram, also known as a sparse histogram. -// Original design doc: -// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit -// The appendix of this design doc also explains the concept of float -// histograms. This Histogram message can represent both, the usual -// integer histogram as well as a float histogram. -type Histogram struct { - // Types that are valid to be assigned to Count: - // - // *Histogram_CountInt - // *Histogram_CountFloat - Count isHistogram_Count `protobuf_oneof:"count"` - Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` - // The schema defines the bucket schema. Currently, valid numbers - // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 - // is a bucket boundary in each case, and then each power of two is - // divided into 2^n logarithmic buckets. Or in other words, each - // bucket boundary is the previous boundary times 2^(2^-n). In the - // future, more bucket schemas may be added using numbers < -4 or > - // 8. - Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` - ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` - // Types that are valid to be assigned to ZeroCount: - // - // *Histogram_ZeroCountInt - // *Histogram_ZeroCountFloat - ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` - // Negative Buckets. - NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` - // Use either "negative_deltas" or "negative_counts", the former for - // regular histograms with integer counts, the latter for float - // histograms. - NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` - NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` - // Positive Buckets. - PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` - // Use either "positive_deltas" or "positive_counts", the former for - // regular histograms with integer counts, the latter for float - // histograms. - PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` - PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` - ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return m.Size() -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -type isHistogram_Count interface { - isHistogram_Count() - MarshalTo([]byte) (int, error) - Size() int -} -type isHistogram_ZeroCount interface { - isHistogram_ZeroCount() - MarshalTo([]byte) (int, error) - Size() int -} - -type Histogram_CountInt struct { - CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` -} -type Histogram_CountFloat struct { - CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` -} -type Histogram_ZeroCountInt struct { - ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` -} -type Histogram_ZeroCountFloat struct { - ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` -} - -func (*Histogram_CountInt) isHistogram_Count() {} -func (*Histogram_CountFloat) isHistogram_Count() {} -func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} -func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} - -func (m *Histogram) GetCount() isHistogram_Count { - if m != nil { - return m.Count - } - return nil -} -func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { - if m != nil { - return m.ZeroCount - } - return nil -} - -func (m *Histogram) GetCountInt() uint64 { - if x, ok := m.GetCount().(*Histogram_CountInt); ok { - return x.CountInt - } - return 0 -} - -func (m *Histogram) GetCountFloat() float64 { - if x, ok := m.GetCount().(*Histogram_CountFloat); ok { - return x.CountFloat - } - return 0 -} - -func (m *Histogram) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *Histogram) GetSchema() int32 { - if m != nil { - return m.Schema - } - return 0 -} - -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil { - return m.ZeroThreshold - } - return 0 -} - -func (m *Histogram) GetZeroCountInt() uint64 { - if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { - return x.ZeroCountInt - } - return 0 -} - -func (m *Histogram) GetZeroCountFloat() float64 { - if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { - return x.ZeroCountFloat - } - return 0 -} - -func (m *Histogram) GetNegativeSpans() []BucketSpan { - if m != nil { - return m.NegativeSpans - } - return nil -} - -func (m *Histogram) GetNegativeDeltas() []int64 { - if m != nil { - return m.NegativeDeltas - } - return nil -} - -func (m *Histogram) GetNegativeCounts() []float64 { - if m != nil { - return m.NegativeCounts - } - return nil -} - -func (m *Histogram) GetPositiveSpans() []BucketSpan { - if m != nil { - return m.PositiveSpans - } - return nil -} - -func (m *Histogram) GetPositiveDeltas() []int64 { - if m != nil { - return m.PositiveDeltas - } - return nil -} - -func (m *Histogram) GetPositiveCounts() []float64 { - if m != nil { - return m.PositiveCounts - } - return nil -} - -func (m *Histogram) GetResetHint() Histogram_ResetHint { - if m != nil { - return m.ResetHint - } - return Histogram_UNKNOWN -} - -func (m *Histogram) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Histogram) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Histogram_CountInt)(nil), - (*Histogram_CountFloat)(nil), - (*Histogram_ZeroCountInt)(nil), - (*Histogram_ZeroCountFloat)(nil), - } -} - -// A BucketSpan defines a number of consecutive buckets with their -// offset. Logically, it would be more straightforward to include the -// bucket counts in the Span. However, the protobuf representation is -// more compact in the way the data is structured here (with all the -// buckets in a single array separate from the Spans). -type BucketSpan struct { - Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} -} -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) -} -func (m *BucketSpan) XXX_Size() int { - return m.Size() -} -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) -} - -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo - -func (m *BucketSpan) GetOffset() int32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *BucketSpan) GetLength() uint32 { - if m != nil { - return m.Length - } - return 0 -} - -// TimeSeries represents samples and labels for a single time series. -type TimeSeries struct { - // For a timeseries to be valid, and for the samples and exemplars - // to be ingested by the remote system properly, the labels field is required. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` - Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} -} -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return m.Size() -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetLabels() []Label { - if m != nil { - return m.Labels - } - return nil -} - -func (m *TimeSeries) GetSamples() []Sample { - if m != nil { - return m.Samples - } - return nil -} - -func (m *TimeSeries) GetExemplars() []Exemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -func (m *TimeSeries) GetHistograms() []Histogram { - if m != nil { - return m.Histograms - } - return nil -} - -type Label struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} -} -func (m *Label) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Label.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Label) XXX_Merge(src proto.Message) { - xxx_messageInfo_Label.Merge(m, src) -} -func (m *Label) XXX_Size() int { - return m.Size() -} -func (m *Label) XXX_DiscardUnknown() { - xxx_messageInfo_Label.DiscardUnknown(m) -} - -var xxx_messageInfo_Label proto.InternalMessageInfo - -func (m *Label) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Label) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type Labels struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Labels) Reset() { *m = Labels{} } -func (m *Labels) String() string { return proto.CompactTextString(m) } -func (*Labels) ProtoMessage() {} -func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} -} -func (m *Labels) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Labels.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Labels) XXX_Merge(src proto.Message) { - xxx_messageInfo_Labels.Merge(m, src) -} -func (m *Labels) XXX_Size() int { - return m.Size() -} -func (m *Labels) XXX_DiscardUnknown() { - xxx_messageInfo_Labels.DiscardUnknown(m) -} - -var xxx_messageInfo_Labels proto.InternalMessageInfo - -func (m *Labels) GetLabels() []Label { - if m != nil { - return m.Labels - } - return nil -} - -// Matcher specifies a rule, which can match or set of labels or not. -type LabelMatcher struct { - Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8} -} -func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatcher) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatcher.Merge(m, src) -} -func (m *LabelMatcher) XXX_Size() int { - return m.Size() -} -func (m *LabelMatcher) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatcher.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo - -func (m *LabelMatcher) GetType() LabelMatcher_Type { - if m != nil { - return m.Type - } - return LabelMatcher_EQ -} - -func (m *LabelMatcher) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LabelMatcher) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type ReadHints struct { - StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"` - Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"` - StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` - EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"` - Grouping []string `protobuf:"bytes,5,rep,name=grouping,proto3" json:"grouping,omitempty"` - By bool `protobuf:"varint,6,opt,name=by,proto3" json:"by,omitempty"` - RangeMs int64 `protobuf:"varint,7,opt,name=range_ms,json=rangeMs,proto3" json:"range_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadHints) Reset() { *m = ReadHints{} } -func (m *ReadHints) String() string { return proto.CompactTextString(m) } -func (*ReadHints) ProtoMessage() {} -func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{9} -} -func (m *ReadHints) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadHints.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadHints) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadHints.Merge(m, src) -} -func (m *ReadHints) XXX_Size() int { - return m.Size() -} -func (m *ReadHints) XXX_DiscardUnknown() { - xxx_messageInfo_ReadHints.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadHints proto.InternalMessageInfo - -func (m *ReadHints) GetStepMs() int64 { - if m != nil { - return m.StepMs - } - return 0 -} - -func (m *ReadHints) GetFunc() string { - if m != nil { - return m.Func - } - return "" -} - -func (m *ReadHints) GetStartMs() int64 { - if m != nil { - return m.StartMs - } - return 0 -} - -func (m *ReadHints) GetEndMs() int64 { - if m != nil { - return m.EndMs - } - return 0 -} - -func (m *ReadHints) GetGrouping() []string { - if m != nil { - return m.Grouping - } - return nil -} - -func (m *ReadHints) GetBy() bool { - if m != nil { - return m.By - } - return false -} - -func (m *ReadHints) GetRangeMs() int64 { - if m != nil { - return m.RangeMs - } - return 0 -} - -// Chunk represents a TSDB chunk. -// Time range [min, max] is inclusive. -type Chunk struct { - MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` - MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` - Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Chunk) Reset() { *m = Chunk{} } -func (m *Chunk) String() string { return proto.CompactTextString(m) } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{10} -} -func (m *Chunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(m, src) -} -func (m *Chunk) XXX_Size() int { - return m.Size() -} -func (m *Chunk) XXX_DiscardUnknown() { - xxx_messageInfo_Chunk.DiscardUnknown(m) -} - -var xxx_messageInfo_Chunk proto.InternalMessageInfo - -func (m *Chunk) GetMinTimeMs() int64 { - if m != nil { - return m.MinTimeMs - } - return 0 -} - -func (m *Chunk) GetMaxTimeMs() int64 { - if m != nil { - return m.MaxTimeMs - } - return 0 -} - -func (m *Chunk) GetType() Chunk_Encoding { - if m != nil { - return m.Type - } - return Chunk_UNKNOWN -} - -func (m *Chunk) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// ChunkedSeries represents single, encoded time series. -type ChunkedSeries struct { - // Labels should be sorted. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // Chunks will be in start time order and may overlap. - Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } -func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } -func (*ChunkedSeries) ProtoMessage() {} -func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{11} -} -func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunkedSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunkedSeries.Merge(m, src) -} -func (m *ChunkedSeries) XXX_Size() int { - return m.Size() -} -func (m *ChunkedSeries) XXX_DiscardUnknown() { - xxx_messageInfo_ChunkedSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo - -func (m *ChunkedSeries) GetLabels() []Label { - if m != nil { - return m.Labels - } - return nil -} - -func (m *ChunkedSeries) GetChunks() []Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -func init() { - proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) - proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) - proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) - proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) - proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") - proto.RegisterType((*Sample)(nil), "prometheus.Sample") - proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") - proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") - proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") - proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") - proto.RegisterType((*Label)(nil), "prometheus.Label") - proto.RegisterType((*Labels)(nil), "prometheus.Labels") - proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher") - proto.RegisterType((*ReadHints)(nil), "prometheus.ReadHints") - proto.RegisterType((*Chunk)(nil), "prometheus.Chunk") - proto.RegisterType((*ChunkedSeries)(nil), "prometheus.ChunkedSeries") -} - -func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } - -var fileDescriptor_d938547f84707355 = []byte{ - // 1092 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, - 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, - 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, - 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, - 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, - 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, - 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, - 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, - 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, - 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, - 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, - 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, - 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, - 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, - 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, - 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, - 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, - 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, - 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, - 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, - 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, - 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, - 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, - 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, - 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, - 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, - 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, - 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, - 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, - 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, - 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, - 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, - 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, - 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, - 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, - 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, - 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, - 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, - 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, - 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, - 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, - 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, - 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, - 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, - 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, - 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, - 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, - 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, - 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, - 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, - 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, - 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, - 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, - 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, - 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, - 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, - 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, - 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, - 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, - 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, - 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, - 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, - 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, - 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, - 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, - 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, - 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, - 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, - 0x13, 0x09, 0x00, 0x00, -} - -func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Unit) > 0 { - i -= len(m.Unit) - copy(dAtA[i:], m.Unit) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Unit))) - i-- - dAtA[i] = 0x2a - } - if len(m.Help) > 0 { - i -= len(m.Help) - copy(dAtA[i:], m.Help) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Help))) - i-- - dAtA[i] = 0x22 - } - if len(m.MetricFamilyName) > 0 { - i -= len(m.MetricFamilyName) - copy(dAtA[i:], m.MetricFamilyName) - i = encodeVarintTypes(dAtA, i, uint64(len(m.MetricFamilyName))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Timestamp != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x10 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *Exemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Timestamp != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x18 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Histogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Timestamp != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x78 - } - if m.ResetHint != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) - i-- - dAtA[i] = 0x70 - } - if len(m.PositiveCounts) > 0 { - for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) - } - i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) - i-- - dAtA[i] = 0x6a - } - if len(m.PositiveDeltas) > 0 { - var j2 int - dAtA4 := make([]byte, len(m.PositiveDeltas)*10) - for _, num := range m.PositiveDeltas { - x3 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x3 >= 1<<7 { - dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) - j2++ - x3 >>= 7 - } - dAtA4[j2] = uint8(x3) - j2++ - } - i -= j2 - copy(dAtA[i:], dAtA4[:j2]) - i = encodeVarintTypes(dAtA, i, uint64(j2)) - i-- - dAtA[i] = 0x62 - } - if len(m.PositiveSpans) > 0 { - for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if len(m.NegativeCounts) > 0 { - for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) - } - i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) - i-- - dAtA[i] = 0x52 - } - if len(m.NegativeDeltas) > 0 { - var j6 int - dAtA8 := make([]byte, len(m.NegativeDeltas)*10) - for _, num := range m.NegativeDeltas { - x7 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x7 >= 1<<7 { - dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) - j6++ - x7 >>= 7 - } - dAtA8[j6] = uint8(x7) - j6++ - } - i -= j6 - copy(dAtA[i:], dAtA8[:j6]) - i = encodeVarintTypes(dAtA, i, uint64(j6)) - i-- - dAtA[i] = 0x4a - } - if len(m.NegativeSpans) > 0 { - for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.ZeroCount != nil { - { - size := m.ZeroCount.Size() - i -= size - if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.ZeroThreshold != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) - i-- - dAtA[i] = 0x29 - } - if m.Schema != 0 { - i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) - i-- - dAtA[i] = 0x20 - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x19 - } - if m.Count != nil { - { - size := m.Count.Size() - i -= size - if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} -func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) - i-- - dAtA[i] = 0x11 - return len(dAtA) - i, nil -} -func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) - i-- - dAtA[i] = 0x30 - return len(dAtA) - i, nil -} -func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) - i-- - dAtA[i] = 0x39 - return len(dAtA) - i, nil -} -func (m *BucketSpan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Length != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Length)) - i-- - dAtA[i] = 0x10 - } - if m.Offset != 0 { - i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Histograms) > 0 { - for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Label) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Label) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Labels) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Labels) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ReadHints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadHints) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RangeMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RangeMs)) - i-- - dAtA[i] = 0x38 - } - if m.By { - i-- - if m.By { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.Grouping) > 0 { - for iNdEx := len(m.Grouping) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Grouping[iNdEx]) - copy(dAtA[i:], m.Grouping[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Grouping[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.EndMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.EndMs)) - i-- - dAtA[i] = 0x20 - } - if m.StartMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.StartMs)) - i-- - dAtA[i] = 0x18 - } - if len(m.Func) > 0 { - i -= len(m.Func) - copy(dAtA[i:], m.Func) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Func))) - i-- - dAtA[i] = 0x12 - } - if m.StepMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.StepMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Chunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x18 - } - if m.MaxTimeMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MaxTimeMs)) - i-- - dAtA[i] = 0x10 - } - if m.MinTimeMs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MinTimeMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ChunkedSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunkedSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunkedSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MetricMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - l = len(m.MetricFamilyName) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Help) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Unit) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 9 - } - if m.Timestamp != 0 { - n += 1 + sovTypes(uint64(m.Timestamp)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Exemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.Value != 0 { - n += 9 - } - if m.Timestamp != 0 { - n += 1 + sovTypes(uint64(m.Timestamp)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Histogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != nil { - n += m.Count.Size() - } - if m.Sum != 0 { - n += 9 - } - if m.Schema != 0 { - n += 1 + sozTypes(uint64(m.Schema)) - } - if m.ZeroThreshold != 0 { - n += 9 - } - if m.ZeroCount != nil { - n += m.ZeroCount.Size() - } - if len(m.NegativeSpans) > 0 { - for _, e := range m.NegativeSpans { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.NegativeDeltas) > 0 { - l = 0 - for _, e := range m.NegativeDeltas { - l += sozTypes(uint64(e)) - } - n += 1 + sovTypes(uint64(l)) + l - } - if len(m.NegativeCounts) > 0 { - n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 - } - if len(m.PositiveSpans) > 0 { - for _, e := range m.PositiveSpans { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.PositiveDeltas) > 0 { - l = 0 - for _, e := range m.PositiveDeltas { - l += sozTypes(uint64(e)) - } - n += 1 + sovTypes(uint64(l)) + l - } - if len(m.PositiveCounts) > 0 { - n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 - } - if m.ResetHint != 0 { - n += 1 + sovTypes(uint64(m.ResetHint)) - } - if m.Timestamp != 0 { - n += 1 + sovTypes(uint64(m.Timestamp)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Histogram_CountInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovTypes(uint64(m.CountInt)) - return n -} -func (m *Histogram_CountFloat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *Histogram_ZeroCountInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovTypes(uint64(m.ZeroCountInt)) - return n -} -func (m *Histogram_ZeroCountFloat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *BucketSpan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sozTypes(uint64(m.Offset)) - } - if m.Length != 0 { - n += 1 + sovTypes(uint64(m.Length)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TimeSeries) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.Histograms) > 0 { - for _, e := range m.Histograms { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Label) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Labels) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LabelMatcher) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadHints) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StepMs != 0 { - n += 1 + sovTypes(uint64(m.StepMs)) - } - l = len(m.Func) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.StartMs != 0 { - n += 1 + sovTypes(uint64(m.StartMs)) - } - if m.EndMs != 0 { - n += 1 + sovTypes(uint64(m.EndMs)) - } - if len(m.Grouping) > 0 { - for _, s := range m.Grouping { - l = len(s) - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.By { - n += 2 - } - if m.RangeMs != 0 { - n += 1 + sovTypes(uint64(m.RangeMs)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MinTimeMs != 0 { - n += 1 + sovTypes(uint64(m.MinTimeMs)) - } - if m.MaxTimeMs != 0 { - n += 1 + sovTypes(uint64(m.MaxTimeMs)) - } - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ChunkedSeries) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MetricMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= MetricMetadata_MetricType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilyName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricFamilyName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Help = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Unit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Exemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Histogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Histogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Count = &Histogram_CountInt{v} - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Schema = v - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ZeroThreshold = float64(math.Float64frombits(v)) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ZeroCount = &Histogram_ZeroCountInt{v} - case 7: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) - if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.NegativeDeltas) == 0 { - m.NegativeDeltas = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) - } - case 10: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.NegativeCounts = append(m.NegativeCounts, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.NegativeCounts) == 0 { - m.NegativeCounts = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.NegativeCounts = append(m.NegativeCounts, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) - if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PositiveDeltas) == 0 { - m.PositiveDeltas = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) - } - case 13: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.PositiveCounts = append(m.PositiveCounts, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.PositiveCounts) == 0 { - m.PositiveCounts = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.PositiveCounts = append(m.PositiveCounts, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) - } - m.ResetHint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BucketSpan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Offset = v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) - } - m.Length = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Length |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Histograms = append(m.Histograms, Histogram{}) - if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Label) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Label: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Labels) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Labels: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatcher) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= LabelMatcher_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadHints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadHints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadHints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StepMs", wireType) - } - m.StepMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StepMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Func", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Func = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartMs", wireType) - } - m.StartMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndMs", wireType) - } - m.EndMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Grouping", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Grouping = append(m.Grouping, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field By", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.By = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeMs", wireType) - } - m.RangeMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RangeMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) - } - m.MinTimeMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinTimeMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) - } - m.MaxTimeMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxTimeMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Chunk_Encoding(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunkedSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunkedSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunkedSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto deleted file mode 100644 index 61fc1e01..00000000 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; -package prometheus; - -option go_package = "prompb"; - -import "gogoproto/gogo.proto"; - -message MetricMetadata { - enum MetricType { - UNKNOWN = 0; - COUNTER = 1; - GAUGE = 2; - HISTOGRAM = 3; - GAUGEHISTOGRAM = 4; - SUMMARY = 5; - INFO = 6; - STATESET = 7; - } - - // Represents the metric type, these match the set from Prometheus. - // Refer to github.com/prometheus/common/model/metadata.go for details. - MetricType type = 1; - string metric_family_name = 2; - string help = 4; - string unit = 5; -} - -message Sample { - double value = 1; - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - int64 timestamp = 2; -} - -message Exemplar { - // Optional, can be empty. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - double value = 2; - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - int64 timestamp = 3; -} - -// A native histogram, also known as a sparse histogram. -// Original design doc: -// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit -// The appendix of this design doc also explains the concept of float -// histograms. This Histogram message can represent both, the usual -// integer histogram as well as a float histogram. -message Histogram { - enum ResetHint { - UNKNOWN = 0; // Need to test for a counter reset explicitly. - YES = 1; // This is the 1st histogram after a counter reset. - NO = 2; // There was no counter reset between this and the previous Histogram. - GAUGE = 3; // This is a gauge histogram where counter resets don't happen. - } - - oneof count { // Count of observations in the histogram. - uint64 count_int = 1; - double count_float = 2; - } - double sum = 3; // Sum of observations in the histogram. - // The schema defines the bucket schema. Currently, valid numbers - // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 - // is a bucket boundary in each case, and then each power of two is - // divided into 2^n logarithmic buckets. Or in other words, each - // bucket boundary is the previous boundary times 2^(2^-n). In the - // future, more bucket schemas may be added using numbers < -4 or > - // 8. - sint32 schema = 4; - double zero_threshold = 5; // Breadth of the zero bucket. - oneof zero_count { // Count in zero bucket. - uint64 zero_count_int = 6; - double zero_count_float = 7; - } - - // Negative Buckets. - repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; - // Use either "negative_deltas" or "negative_counts", the former for - // regular histograms with integer counts, the latter for float - // histograms. - repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double negative_counts = 10; // Absolute count of each bucket. - - // Positive Buckets. - repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; - // Use either "positive_deltas" or "positive_counts", the former for - // regular histograms with integer counts, the latter for float - // histograms. - repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double positive_counts = 13; // Absolute count of each bucket. - - ResetHint reset_hint = 14; - // timestamp is in ms format, see model/timestamp/timestamp.go for - // conversion from time.Time to Prometheus timestamp. - int64 timestamp = 15; -} - -// A BucketSpan defines a number of consecutive buckets with their -// offset. Logically, it would be more straightforward to include the -// bucket counts in the Span. However, the protobuf representation is -// more compact in the way the data is structured here (with all the -// buckets in a single array separate from the Spans). -message BucketSpan { - sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). - uint32 length = 2; // Length of consecutive buckets. -} - -// TimeSeries represents samples and labels for a single time series. -message TimeSeries { - // For a timeseries to be valid, and for the samples and exemplars - // to be ingested by the remote system properly, the labels field is required. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - repeated Sample samples = 2 [(gogoproto.nullable) = false]; - repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; - repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; -} - -message Label { - string name = 1; - string value = 2; -} - -message Labels { - repeated Label labels = 1 [(gogoproto.nullable) = false]; -} - -// Matcher specifies a rule, which can match or set of labels or not. -message LabelMatcher { - enum Type { - EQ = 0; - NEQ = 1; - RE = 2; - NRE = 3; - } - Type type = 1; - string name = 2; - string value = 3; -} - -message ReadHints { - int64 step_ms = 1; // Query step size in milliseconds. - string func = 2; // String representation of surrounding function or aggregation. - int64 start_ms = 3; // Start time in milliseconds. - int64 end_ms = 4; // End time in milliseconds. - repeated string grouping = 5; // List of label names used in aggregation. - bool by = 6; // Indicate whether it is without or by. - int64 range_ms = 7; // Range vector selector range in milliseconds. -} - -// Chunk represents a TSDB chunk. -// Time range [min, max] is inclusive. -message Chunk { - int64 min_time_ms = 1; - int64 max_time_ms = 2; - - // We require this to match chunkenc.Encoding. - enum Encoding { - UNKNOWN = 0; - XOR = 1; - HISTOGRAM = 2; - FLOAT_HISTOGRAM = 3; - } - Encoding type = 3; - bytes data = 4; -} - -// ChunkedSeries represents single, encoded time series. -message ChunkedSeries { - // Labels should be sorted. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - // Chunks will be in start time order and may overlap. - repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go deleted file mode 100644 index ff230c44..00000000 --- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// multiError type allows combining multiple errors into one. -type multiError []error - -// NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return. - m := multiError{} - m.Add(errs...) - return m -} - -// Add adds single or many errors to the error list. Each error is added only if not nil. -// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError. -func (es *multiError) Add(errs ...error) { - for _, err := range errs { - if err == nil { - continue - } - var merr nonNilMultiError - if errors.As(err, &merr) { - *es = append(*es, merr.errs...) - continue - } - *es = append(*es, err) - } -} - -// Err returns the error list as an error or nil if it is empty. -func (es multiError) Err() error { - if len(es) == 0 { - return nil - } - return nonNilMultiError{errs: es} -} - -// nonNilMultiError implements the error interface, and it represents -// multiError with at least one error inside it. -// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil -// check to work. -type nonNilMultiError struct { - errs multiError -} - -// Error returns a concatenated string of the contained errors. -func (es nonNilMultiError) Error() string { - var buf bytes.Buffer - - if len(es.errs) > 1 { - fmt.Fprintf(&buf, "%d errors: ", len(es.errs)) - } - - for i, err := range es.errs { - if i != 0 { - buf.WriteString("; ") - } - buf.WriteString(err.Error()) - } - - return buf.String() -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored in the MultiError. -// It returns true if any of the errors in the list match the target. -func (es nonNilMultiError) Is(target error) bool { - for _, err := range es.errs { - if errors.Is(err, target) { - return true - } - } - return false -} - -// CloseAll closes all given closers while recording error in MultiError. -func CloseAll(cs []io.Closer) error { - errs := NewMulti() - for _, c := range cs { - errs.Add(c.Close()) - } - return errs.Err() -} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000..2c033dff --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 00000000..fbf1934a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000..46ceac34 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,515 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000..f58bbc7b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,197 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 00000000..06f2c7a2 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000..99b47c39 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488..d07dd09e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 4cc7b005..2d15200a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e262..3f1d3d4c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2592,3 +2592,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c7..b86ded54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa852..4308ac17 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1169,6 +1169,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c..c8068a7a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1169,6 +1169,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b4..01a70b24 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,6 +457,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -928,6 +929,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +943,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1705,6 +1704,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1780,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1861,6 +1862,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -2498,6 +2512,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -3192,6 +3223,7 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd5..684a5168 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317af..61d74b59 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668a..a28c9e3e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508..ab5d1fe8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afd..c523090e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92c..01e6ea78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba..7aa610b1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0..92af771b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e..b27ef5e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3..237a2cef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa728..4a5c555a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e0150..a02fb49a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e..e26a7c61 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214..c48f7c21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d9..ad4b9aac 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 07642c30..b622533e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 923e08cb..cfe6646b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 7d73dda6..13f624f6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 05770011..fe222b75 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a..1bc1a5ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2229,3 +2229,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410..1851df14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b56173..0b43c693 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751..e1ec0dbe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656..880c6d6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776..7c8452a6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f..b8ef95b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d07..2ffdf861 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d5321..2af3b5c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e6..1da08d52 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b6..b7a25135 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354..6e85b0aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff0..f15dadf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed5..28b487df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a..1e7f321e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc..524b0820 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d52476..d3e38f68 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -379,4 +379,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c7477061..70b35bf3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f..6c778c23 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346..37281cf5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -318,4 +318,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018..7e567f1e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b..38ae55e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca..55e92e60 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c3..60658d6a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc4..e203e8a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b..5944b97d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e1635..c66d416d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9..9889f6a5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad..01d86825 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d7..7b703e77 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b834..7f1961b9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -110,7 +110,8 @@ type Statx_t struct { Mnt_id uint64 Dio_mem_align uint32 Dio_offset_align uint32 - _ [12]uint64 + Subvol uint64 + _ [11]uint64 } type Fsid struct { @@ -3473,7 +3474,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3806,6 +3807,9 @@ const ( ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3975,7 +3979,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 97651b5b..b6e1ab76 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1179,7 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW -//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f..1fa34fd1 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -1368,9 +1374,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db..3f03b3d5 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2003,7 +2003,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2031,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -3404,3 +3462,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index eba76101..9bb979a3 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -478,12 +478,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -789,6 +793,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -1225,14 +1237,6 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } -func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { - r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) - if r0 == 0 { - ret = GetLastError() - } - return -} - func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -4082,6 +4086,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4115,6 +4125,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4124,6 +4143,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/term/LICENSE +++ b/vendor/golang.org/x/term/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index b5da768f..360fc802 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -27,6 +27,8 @@ github.com/eclipse/paho.mqtt.golang/packets ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/evanphx/json-patch v5.6.0+incompatible +## explicit # github.com/evanphx/json-patch/v5 v5.9.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 @@ -110,8 +112,8 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240528025155-186aa0362fba -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit @@ -127,7 +129,6 @@ github.com/gorilla/mux github.com/gorilla/websocket # github.com/grafana/dskit v0.0.0-20231012002814-3b80e3b2a51c ## explicit; go 1.19 -github.com/grafana/dskit/backoff github.com/grafana/dskit/flagext github.com/grafana/dskit/grpcutil github.com/grafana/dskit/httpgrpc @@ -136,20 +137,11 @@ github.com/grafana/dskit/instrument github.com/grafana/dskit/log github.com/grafana/dskit/middleware github.com/grafana/dskit/modules -github.com/grafana/dskit/multierror -github.com/grafana/dskit/runutil github.com/grafana/dskit/server github.com/grafana/dskit/services github.com/grafana/dskit/signals github.com/grafana/dskit/tracing github.com/grafana/dskit/user -# github.com/grafana/e2e v0.1.1 -## explicit; go 1.18 -github.com/grafana/e2e -# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc -## explicit; go 1.21 -github.com/grafana/regexp -github.com/grafana/regexp/syntax # github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 ## explicit; go 1.20 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule @@ -191,8 +183,8 @@ github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/onsi/ginkgo/v2 v2.17.2 -## explicit; go 1.20 +# github.com/onsi/ginkgo/v2 v2.20.2 +## explicit; go 1.22 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -213,7 +205,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.33.1 +# github.com/onsi/gomega v1.34.1 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -266,11 +258,6 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.53.0 -## explicit; go 1.21 -github.com/prometheus/prometheus/model/labels -github.com/prometheus/prometheus/prompb -github.com/prometheus/prometheus/tsdb/errors # github.com/rivo/uniseg v0.2.0 ## explicit; go 1.12 github.com/rivo/uniseg @@ -396,14 +383,16 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.25.0 +# golang.org/x/crypto v0.26.0 ## explicit; go 1.20 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish -# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 +# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 +golang.org/x/exp/constraints golang.org/x/exp/maps -# golang.org/x/net v0.27.0 +golang.org/x/exp/slices +# golang.org/x/net v0.28.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -424,20 +413,20 @@ golang.org/x/net/trace golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.22.0 +# golang.org/x/sys v0.24.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.22.0 +# golang.org/x/term v0.23.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.16.0 +# golang.org/x/text v0.17.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -462,7 +451,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.22.0 +# golang.org/x/tools v0.24.0 ## explicit; go 1.19 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector