diff --git a/go.mod b/go.mod
index 45f967115..ad18ae235 100644
--- a/go.mod
+++ b/go.mod
@@ -16,8 +16,8 @@ require (
github.com/openshift/library-go v0.0.0-20240711192904-190fec8c3f09 // release-4.16
github.com/operator-framework/api v0.29.0
github.com/operator-framework/operator-lib v0.17.0
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0
- github.com/prometheus/client_golang v1.20.5
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1
+ github.com/prometheus/client_golang v1.21.0
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.62.0
github.com/spf13/cobra v1.9.1
@@ -39,7 +39,7 @@ require (
)
require (
- cel.dev/expr v0.20.0 // indirect
+ cel.dev/expr v0.21.2 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -57,7 +57,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -67,7 +67,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -84,7 +84,7 @@ require (
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.35.0 // indirect
- golang.org/x/oauth2 v0.26.0 // indirect
+ golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
diff --git a/go.sum b/go.sum
index be24c168b..fa0046e7a 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI=
-cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cel.dev/expr v0.21.2 h1:o+Wj235dy4gFYlYin3JsMpp3EEfMrPm/6tdoyjT98S0=
+cel.dev/expr v0.21.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -103,8 +103,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -136,8 +136,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -208,10 +208,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 h1:ckSycH7xHtpcvXsmEY/qEziRhDQKqKqbsHi9kX/BO7A=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M=
-github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
-github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M=
+github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
+github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
@@ -292,8 +292,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
-golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
index d127d4362..def01a6be 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -19,6 +19,7 @@ const (
tbFunc // func(T) bool
ttbFunc // func(T, T) bool
+ ttiFunc // func(T, T) int
trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
@@ -28,11 +29,13 @@ const (
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
+ Compare = ttiFunc // func(T, T) int
ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
@@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
+ case ttiFunc: // func(T, T) int
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+ return true
+ }
case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType {
return true
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index 754496f3b..ba3fce81f 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
- if _, ok := reflect.New(t).Interface().(error); ok {
+ isProtoMessage := func(t reflect.Type) bool {
+ m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+ return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+ m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+ m.Type.Out(0).Name() == "Message"
+ }
+ if isProtoMessage(t) {
+ help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+ } else if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index de264c85a..244ee19c4 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -14,8 +14,34 @@ This package provides various compression algorithms.
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
+# package usage
+
+Use `go get github.com/klauspost/compress@latest` to add it to your project.
+
+This package will support the current Go version and 2 versions back.
+
+* Use the `nounsafe` tag to disable all use of the "unsafe" package.
+* Use the `noasm` tag to disable all assembly across packages.
+
+Use the links above for more information on each.
+
# changelog
+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
+ * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
+ * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
+ * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
+ * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
+ * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
+ * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
+ * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
+ * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
+ * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
+ * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
+ * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
+
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
@@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
- * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+ * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
- * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+ * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
@@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
See changes to v1.15.x
* Jan 21st, 2023 (v1.15.15)
- * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
@@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
- * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+ * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
* July 13, 2022 (v1.15.8)
@@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
- * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+ * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
@@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
- * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
- * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
- * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
- * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
- * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+ * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
@@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
- * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+ * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-| old import | new import | Documentation
-|--------------------|-----------------------------------------|--------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
-| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
-| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
-| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+Typical speed is about 2x of the standard library packages.
+
+| old import | new import | Documentation |
+|------------------|---------------------------------------|-------------------------------------------------------------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-## Huffman only compression
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index e36d9742f..bfc7a523d 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
package huff0
import (
- "encoding/binary"
"errors"
"fmt"
"io"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
return nil
}
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
@@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
}
// 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 {
return
}
- if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ if b.off >= 4 {
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
return
}
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
@@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 000000000..e54909e16
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+ int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 000000000..0cfb5c0e2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+ "encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ binary.LittleEndian.PutUint64(b, v)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 000000000..ada45cd90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,55 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+ "unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ //return binary.LittleEndian.Uint32(b[i:])
+ //return *(*uint32)(unsafe.Pointer(&b[i]))
+ return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ //return binary.LittleEndian.Uint64(b[i:])
+ //return *(*uint64)(unsafe.Pointer(&b[i]))
+ return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ //binary.LittleEndian.PutUint16(b, v)
+ *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ //binary.LittleEndian.PutUint32(b, v)
+ *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ //binary.LittleEndian.PutUint64(b, v)
+ *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 5a4412f90..81bda5e29 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
module github.com/klauspost/compress
-go 1.19
-
+go 1.22
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index 92e2347bb..c11d7fa28 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-This package is pure Go and without use of "unsafe".
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 25ca98394..d41e3e170 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
package zstd
import (
- "encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -18,6 +19,7 @@ import (
type bitReader struct {
in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward.
+ cursor int // offset where next read should end
bitsRead uint8
}
@@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
+ b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
@@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
- v := b.in[len(b.in)-8:]
- b.in = b.in[:len(b.in)-8]
- b.value = binary.LittleEndian.Uint64(v)
+ b.cursor -= 8
+ b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
}
@@ -87,25 +87,23 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
- if len(b.in) >= 4 {
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ if b.cursor >= 4 {
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
return
}
- b.bitsRead -= uint8(8 * len(b.in))
- for len(b.in) > 0 {
- b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
- b.in = b.in[:len(b.in)-1]
+ b.bitsRead -= uint8(8 * b.cursor)
+ for b.cursor > 0 {
+ b.cursor -= 1
+ b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
- return len(b.in) == 0 && b.bitsRead >= 64
+ return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
- return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
+ return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 9c28840c3..0dd742fd2 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
package zstd
import (
- "bytes"
- "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
- "os"
- "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
- // Extract blocks...
- if false && hist.dict == nil {
- fatalErr := func(err error) {
- if err != nil {
- panic(err)
- }
- }
- fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
- var buf bytes.Buffer
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
- buf.Write(in)
- os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
- }
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 32a7f401d..fd35ea148 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@ import (
"fmt"
"math"
"math/bits"
+ "slices"
"github.com/klauspost/compress/huff0"
)
@@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
// All 0
return 0
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
- cnt := maxCount(hist[:maxSym])
+ cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
@@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
}
}
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
@@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
- b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
- b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
- b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+ b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+ b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+ b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index bbca17234..ea2a19376 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
@@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
+ frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 5ca46038a..7d250c67f 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(err)
}
if t < 0 {
- err := fmt.Sprintf("s (%d) < 0", s)
+ err := fmt.Sprintf("t (%d) < 0", t)
panic(err)
}
if s-t > e.maxMatchOff {
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
index 57b9c31c0..bea1779e9 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -7,20 +7,25 @@
package zstd
import (
- "encoding/binary"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
- for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
- diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ left := len(a)
+ for left >= 8 {
+ diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
+ left -= 8
}
+ a = a[n:]
+ b = b[n:]
for i := range a {
if a[i] != b[i] {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index d7fe6d82d..9a7de82f9 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index f5591fa1e..a708ca6d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -7,9 +7,9 @@
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -335,9 +335,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -634,9 +634,9 @@ error_overread:
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -920,9 +920,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -1787,9 +1787,9 @@ empty_seqs:
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -2281,8 +2281,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2349,9 +2349,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -2801,8 +2801,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2869,9 +2869,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -3465,8 +3465,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -3533,9 +3533,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -4087,8 +4087,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index 2fb35b788..7cec2197c 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
for i := range seqs {
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174a7..65045eabd 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
func llCode(litLength uint32) uint8 {
const llDeltaCode = 19
if litLength <= 63 {
- // Compiler insists on bounds check (Go 1.12)
return llCodeTable[litLength&63]
}
return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36
if mlBase <= 127 {
- // Compiler insists on bounds check (Go 1.12)
return mlCodeTable[mlBase&127]
}
return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index ec13594e8..a17381b8f 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
@@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 066bef2a4..6252b46ae 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,10 +5,11 @@ package zstd
import (
"bytes"
- "encoding/binary"
"errors"
"log"
"math"
+
+ "github.com/klauspost/compress/internal/le"
)
// enable debug printing
@@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) {
}
func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
+ return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
+ return le.Load64(b, i)
}
type byter interface {
diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go
index 72a01309c..afcf122ef 100644
--- a/vendor/github.com/prometheus/client_golang/api/client.go
+++ b/vendor/github.com/prometheus/client_golang/api/client.go
@@ -79,6 +79,10 @@ type Client interface {
Do(context.Context, *http.Request) (*http.Response, []byte, error)
}
+type CloseIdler interface {
+ CloseIdleConnections()
+}
+
// NewClient returns a new Client.
//
// It is safe to use the returned Client from multiple goroutines.
@@ -118,6 +122,10 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
return &u
}
+func (c *httpClient) CloseIdleConnections() {
+ c.client.CloseIdleConnections()
+}
+
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx != nil {
req = req.WithContext(ctx)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go b/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
new file mode 100644
index 000000000..b65896a31
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+// atomicUpdateFloat atomically updates the float64 value pointed to by bits
+// using the provided updateFunc, with an exponential backoff on contention.
+func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
+ const (
+ // both numbers are derived from empirical observations
+ // documented in this PR: https://github.com/prometheus/client_golang/pull/1661
+ maxBackoff = 320 * time.Millisecond
+ initialBackoff = 10 * time.Millisecond
+ )
+ backoff := initialBackoff
+
+ for {
+ loadedBits := atomic.LoadUint64(bits)
+ oldFloat := math.Float64frombits(loadedBits)
+ newFloat := updateFunc(oldFloat)
+ newBits := math.Float64bits(newFloat)
+
+ if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+ break
+ } else {
+ // Exponential backoff with sleep and cap to avoid infinite wait
+ time.Sleep(backoff)
+ backoff *= 2
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 4ce84e7a8..2996aef6a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
return
}
- for {
- oldBits := atomic.LoadUint64(&c.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
- return
- }
- }
+ atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
+ return oldVal + v
+ })
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 68ffe3c24..ad347113c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -189,12 +189,15 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
- vlStrings := make([]string, 0, len(d.variableLabels.names))
- for _, vl := range d.variableLabels.names {
- if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
- vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
- } else {
- vlStrings = append(vlStrings, vl)
+ vlStrings := []string{}
+ if d.variableLabels != nil {
+ vlStrings = make([]string, 0, len(d.variableLabels.names))
+ for _, vl := range d.variableLabels.names {
+ if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+ vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+ } else {
+ vlStrings = append(vlStrings, vl)
+ }
}
}
return fmt.Sprintf(
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index dd2eac940..aa1846365 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -120,13 +120,9 @@ func (g *gauge) Dec() {
}
func (g *gauge) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&g.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
- return
- }
- }
+ atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
+ return oldVal + val
+ })
}
func (g *gauge) Sub(val float64) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 511746417..6b8684731 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
}
func attachOriginalName(desc, origName string) string {
- return fmt.Sprintf("%s Sourced from %s", desc, origName)
+ return fmt.Sprintf("%s Sourced from %s.", desc, origName)
}
// Describe returns all descriptions of the collector.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 519db348a..1a279035b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -14,6 +14,7 @@
package prometheus
import (
+ "errors"
"fmt"
"math"
"runtime"
@@ -28,6 +29,11 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
)
+const (
+ nativeHistogramSchemaMaximum = 8
+ nativeHistogramSchemaMinimum = -4
+)
+
// nativeHistogramBounds for the frac of observed values. Only relevant for
// schema > 0. The position in the slice is the schema. (0 is never used, just
// here for convenience of using the schema directly as the index.)
@@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
-func ExponentialBucketsRange(min, max float64, count int) []float64 {
+func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
if count < 1 {
panic("ExponentialBucketsRange count needs a positive count")
}
- if min <= 0 {
+ if minBucket <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0")
}
@@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 {
// max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor.
- growthFactor := math.Pow(max/min, 1.0/float64(count-1))
+ growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count)
for i := 1; i <= count; i++ {
- buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
+ buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
}
return buckets
}
@@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error {
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ n := len(h.upperBounds)
+ if n == 0 {
+ return 0
+ }
+
+ // Early exit: if v is less than or equal to the first upper bound, return 0
+ if v <= h.upperBounds[0] {
+ return 0
+ }
+
+ // Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
+ if v > h.upperBounds[n-1] {
+ return n
+ }
+
+ // For small arrays, use simple linear search
+ // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
+ // see more details here: https://github.com/prometheus/client_golang/pull/1662
+ if n < 35 {
+ for i, bound := range h.upperBounds {
+ if v <= bound {
+ return i
+ }
+ }
+ // If v is greater than all upper bounds, return len(h.upperBounds)
+ return n
+ }
+
+ // For larger arrays, use stdlib's binary search
return sort.SearchFloat64s(h.upperBounds, v)
}
@@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 {
floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
switch {
case floor <= -8:
- return 8
+ return nativeHistogramSchemaMaximum
case floor >= 4:
- return -4
+ return nativeHistogramSchemaMinimum
default:
return -int32(floor)
}
@@ -1621,13 +1647,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
// atomicAddFloat adds the provided float atomically to another float
// represented by the bit pattern the bits pointer is pointing to.
func atomicAddFloat(bits *uint64, v float64) {
- for {
- loadedBits := atomic.LoadUint64(bits)
- newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
- if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
- break
- }
- }
+ atomicUpdateFloat(bits, func(oldVal float64) float64 {
+ return oldVal + v
+ })
}
// atomicDecUint32 atomically decrements the uint32 p points to. See
@@ -1835,3 +1857,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
}
}
+
+type constNativeHistogram struct {
+ desc *Desc
+ dto.Histogram
+ labelPairs []*dto.LabelPair
+}
+
+func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
+ var bucketPopulationSum int64
+ for _, v := range positiveBuckets {
+ bucketPopulationSum += v
+ }
+ for _, v := range negativeBuckets {
+ bucketPopulationSum += v
+ }
+ bucketPopulationSum += int64(zeroBucket)
+
+ // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
+ // Otherwise, the number of observations must be equal to the sum of all bucket counts .
+
+ if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
+ !math.IsNaN(sum) && bucketPopulationSum != int64(count) {
+ return errors.New("the sum of all bucket populations exceeds the count of observations")
+ }
+ return nil
+}
+
+// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
+// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// zeroBucket counts all (positive and negative)
+// observations in the zero bucket (with an absolute value less or equal
+// the current threshold).
+// positiveBuckets and negativeBuckets are separate maps for negative and positive
+// observations. The map's value is an int64, counting observations in
+// that bucket. The map's key is the
+// index of the bucket according to the used
+// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
+// NewConstNativeHistogram returns an error if
+// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
+// - the schema passed is not between 8 and -4
+// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
+//
+// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
+func NewConstNativeHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ positiveBuckets, negativeBuckets map[int]int64,
+ zeroBucket uint64,
+ schema int32,
+ zeroThreshold float64,
+ createdTimestamp time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
+ return nil, errors.New("invalid native histogram schema")
+ }
+ if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
+ return nil, err
+ }
+
+ NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
+ PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
+ ret := &constNativeHistogram{
+ desc: desc,
+ Histogram: dto.Histogram{
+ CreatedTimestamp: timestamppb.New(createdTimestamp),
+ Schema: &schema,
+ ZeroThreshold: &zeroThreshold,
+ SampleCount: &count,
+ SampleSum: &sum,
+
+ NegativeSpan: NegativeSpan,
+ NegativeDelta: NegativeDelta,
+
+ PositiveSpan: PositiveSpan,
+ PositiveDelta: PositiveDelta,
+
+ ZeroCount: proto.Uint64(zeroBucket),
+ },
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ }
+ if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
+ ret.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
+ }
+ return ret, nil
+}
+
+// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
+// NewConstNativeHistogram would have returned an error.
+func MustNewConstNativeHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ positiveBuckets, negativeBuckets map[int]int64,
+ zeroBucket uint64,
+ nativeHistogramSchema int32,
+ nativeHistogramZeroThreshold float64,
+ createdTimestamp time.Time,
+ labelValues ...string,
+) Metric {
+ nativehistogram, err := NewConstNativeHistogram(desc,
+ count,
+ sum,
+ positiveBuckets,
+ negativeBuckets,
+ zeroBucket,
+ nativeHistogramSchema,
+ nativeHistogramZeroThreshold,
+ createdTimestamp,
+ labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return nativehistogram
+}
+
+func (h *constNativeHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constNativeHistogram) Write(out *dto.Metric) error {
+ out.Histogram = &h.Histogram
+ out.Label = h.labelPairs
+ return nil
+}
+
+func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
+ if len(buckets) == 0 {
+ return nil, nil
+ }
+ var ii []int
+ for k := range buckets {
+ ii = append(ii, k)
+ }
+ sort.Ints(ii)
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ count := buckets[i]
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one or two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index a595a2036..8b016355a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -22,17 +22,18 @@ import (
"bytes"
"fmt"
"io"
+ "strconv"
"strings"
)
-func min(a, b int) int {
+func minInt(a, b int) int {
if a < b {
return a
}
return b
}
-func max(a, b int) int {
+func maxInt(a, b int) int {
if a > b {
return a
}
@@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
@@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{
- c.Tag, i1, min(i2, i1+n),
- j1, min(j2, j1+n),
+ c.Tag, i1, minInt(i2, i1+n),
+ j1, minInt(j2, j1+n),
})
groups = append(groups, group)
group = []OpCode{}
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
@@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
- return calculateRatio(min(la, lb), la+lb)
+ return calculateRatio(minInt(la, lb), la+lb)
}
// Convert range to the "ed" format
@@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
- return fmt.Sprintf("%d", beginning)
+ return strconv.Itoa(beginning)
}
if length == 0 {
beginning-- // empty ranges begin at line just before the range
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
index 97d17d6cb..f7f97ef92 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
name += "_total"
}
- valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
+ // Our current conversion moves to legacy naming, so use legacy validation.
+ valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 9d9b81ab4..592eec3e2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
if name == "" {
return ""
}
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
+
+ sb := strings.Builder{}
+ sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
+
+ if namespace != "" {
+ sb.WriteString(namespace)
+ sb.WriteString("_")
}
- return name
+
+ if subsystem != "" {
+ sb.WriteString(subsystem)
+ sb.WriteString("_")
+ }
+
+ sb.WriteString(name)
+
+ return sb.String()
}
type invalidMetric struct {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 62a4e7ad9..e7bce8b58 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -23,6 +23,7 @@ import (
type processCollector struct {
collectFn func(chan<- Metric)
+ describeFn func(chan<- *Desc)
pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc
@@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
// Set up process metric collection if supported by the runtime.
if canCollectProcess() {
c.collectFn = c.processCollect
+ c.describeFn = c.describe
} else {
- c.collectFn = func(ch chan<- Metric) {
- c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
- }
+ c.collectFn = c.errorCollectFn
+ c.describeFn = c.errorDescribeFn
}
return c
}
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal
- ch <- c.openFDs
- ch <- c.maxFDs
- ch <- c.vsize
- ch <- c.maxVsize
- ch <- c.rss
- ch <- c.startTime
- ch <- c.inBytes
- ch <- c.outBytes
+func (c *processCollector) errorCollectFn(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+}
+
+func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
+ if c.reportErrors {
+ ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
+ }
}
// Collect returns the current state of all metrics of the collector.
@@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ c.describeFn(ch)
+}
+
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c
new file mode 100644
index 000000000..1554f674d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c
@@ -0,0 +1,84 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && cgo
+
+#include
+#include
+#include
+
+// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
+// mach/shared_region.h instead. But that doesn't define
+// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
+// avoid a warning message when running tests.
+#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
+#define SHARED_DATA_REGION_SIZE 0x10000000
+#define SHARED_TEXT_REGION_SIZE 0x10000000
+
+
+int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
+{
+ // This is lightly adapted from how ps(1) obtains its memory info.
+ // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
+
+ kern_return_t error;
+ task_t task = MACH_PORT_NULL;
+ mach_task_basic_info_data_t info;
+ mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT;
+
+ error = task_info(
+ mach_task_self(),
+ MACH_TASK_BASIC_INFO,
+ (task_info_t) &info,
+ &info_count );
+
+ if( error != KERN_SUCCESS )
+ {
+ return error;
+ }
+
+ *rss = info.resident_size;
+ *vsize = info.virtual_size;
+
+ {
+ vm_region_basic_info_data_64_t b_info;
+ mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
+ mach_vm_size_t size;
+ mach_port_t object_name;
+
+ /*
+ * try to determine if this task has the split libraries
+ * mapped in... if so, adjust its virtual size down by
+ * the 2 segments that are used for split libraries
+ */
+ info_count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ error = mach_vm_region(
+ mach_task_self(),
+ &address,
+ &size,
+ VM_REGION_BASIC_INFO_64,
+ (vm_region_info_t) &b_info,
+ &info_count,
+ &object_name);
+
+ if (error == KERN_SUCCESS) {
+ if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
+ *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
+ *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
new file mode 100644
index 000000000..b375c3a77
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && cgo
+
+package prometheus
+
+/*
+int get_memory_info(unsigned long long *rss, unsigned long long *vs);
+*/
+import "C"
+import "fmt"
+
+func getMemory() (*memoryInfo, error) {
+ var rss, vsize C.ulonglong
+
+ if err := C.get_memory_info(&rss, &vsize); err != 0 {
+ return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
+ }
+
+ return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.maxVsize
+ ch <- c.startTime
+ ch <- c.rss
+ ch <- c.vsize
+
+ /* the process could be collected but not implemented yet
+ ch <- c.inBytes
+ ch <- c.outBytes
+ */
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
new file mode 100644
index 000000000..50eb860a6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
+// isn't available.
+var notImplementedErr = errors.New("not implemented")
+
+type memoryInfo struct {
+ vsize uint64 // Virtual memory size in bytes
+ rss uint64 // Resident memory size in bytes
+}
+
+func canCollectProcess() bool {
+ return true
+}
+
+func getSoftLimit(which int) (uint64, error) {
+ rlimit := syscall.Rlimit{}
+
+ if err := syscall.Getrlimit(which, &rlimit); err != nil {
+ return 0, err
+ }
+
+ return rlimit.Cur, nil
+}
+
+func getOpenFileCount() (float64, error) {
+ // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
+ // return a list of open fds, but that requires a way to call C APIs. The
+ // benefits, however, include fewer system calls and not failing when at the
+ // open file soft limit.
+
+ if dir, err := os.Open("/dev/fd"); err != nil {
+ return 0.0, err
+ } else {
+ defer dir.Close()
+
+ // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is
+ // that info not used, but KQUEUE descriptors fail stat(2), which causes
+ // the whole method to fail.
+ if names, err := dir.Readdirnames(0); err != nil {
+ return 0.0, err
+ } else {
+ // Subtract 1 to ignore the open /dev/fd descriptor above.
+ return float64(len(names) - 1), nil
+ }
+ }
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
+ if len(procs) == 1 {
+ startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+
+ // The proc structure returned by kern.proc.pid above has an Rusage member,
+ // but it is not filled in, so it needs to be fetched by getrusage(2). For
+ // that call, the UTime, STime, and Maxrss members are filled out, but not
+ // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require
+ // access to the C API to call task_info(TASK_BASIC_INFO).
+ rusage := unix.Rusage{}
+
+ if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
+ cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
+ } else {
+ c.reportError(ch, c.cpuTotal, err)
+ }
+
+ if memInfo, err := getMemory(); err == nil {
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
+ } else if !errors.Is(err, notImplementedErr) {
+ // Don't report an error when support is not compiled in.
+ c.reportError(ch, c.rss, err)
+ c.reportError(ch, c.vsize, err)
+ }
+
+ if fds, err := getOpenFileCount(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
+ } else {
+ c.reportError(ch, c.maxFDs, err)
+ }
+
+ if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
+ } else {
+ c.reportError(ch, c.maxVsize, err)
+ }
+
+ // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
+ // be able to get the per-process network send/receive counts.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
new file mode 100644
index 000000000..516504731
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !cgo
+
+package prometheus
+
+func getMemory() (*memoryInfo, error) {
+ return nil, notImplementedErr
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.maxVsize
+ ch <- c.startTime
+
+ /* the process could be collected but not implemented yet
+ ch <- c.rss
+ ch <- c.vsize
+ ch <- c.inBytes
+ ch <- c.outBytes
+ */
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 14d56d2d0..9f4b130be 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !windows && !js && !wasip1
-// +build !windows,!js,!wasip1
+//go:build !windows && !js && !wasip1 && !darwin
+// +build !windows,!js,!wasip1,!darwin
package prometheus
@@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
c.reportError(ch, nil, err)
}
}
+
+// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
deleted file mode 100644
index d8d9a6d7a..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build wasip1
-// +build wasip1
-
-package prometheus
-
-func canCollectProcess() bool {
- return false
-}
-
-func (*processCollector) processCollect(chan<- Metric) {
- // noop on this platform
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go
similarity index 57%
rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go
index b1e363d6c..c68f7f851 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1_js.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build js
-// +build js
+//go:build wasip1 || js
+// +build wasip1 js
package prometheus
@@ -21,6 +21,13 @@ func canCollectProcess() bool {
}
func (c *processCollector) processCollect(ch chan<- Metric) {
- // noop on this platform
- return
+ c.errorCollectFn(ch)
+}
+
+// describe returns all descriptions of the collector for wasip1 and js.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ c.errorDescribeFn(ch)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
index f973398df..fa474289e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) {
}
func (c *processCollector) processCollect(ch chan<- Metric) {
- h, err := windows.GetCurrentProcess()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
+ h := windows.CurrentProcess()
var startTime, exitTime, kernelTime, userTime windows.Filetime
- err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+ err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil {
c.reportError(ch, nil, err)
return
@@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
}
+// describe returns all descriptions of the collector for windows.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index e598e66e6..28eed2672 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -207,7 +207,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
if encodingHeader != string(Identity) {
rsp.Header().Set(contentEncodingHeader, encodingHeader)
}
- enc := expfmt.NewEncoder(w, contentType)
+
+ var enc expfmt.Encoder
+ if opts.EnableOpenMetricsTextCreatedSamples {
+ enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
+ } else {
+ enc = expfmt.NewEncoder(w, contentType)
+ }
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
@@ -408,6 +414,21 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
+ // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
+ // Created Timestamps for counters, histograms and summaries, which for the current
+ // version of OpenMetrics are defined as extra series with the same name and "_created"
+ // suffix. See also the OpenMetrics specification for more details
+ // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
+ //
+ // Created timestamps are used to improve the accuracy of reset detection,
+ // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
+ // if the scraper does not handle those metrics correctly (converting to created timestamp
+ // instead of leaving those series as-is). New OpenMetrics versions might improve
+ // this situation.
+ //
+ // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
+ // in version 2.50.0 to handle this situation.
+ EnableOpenMetricsTextCreatedSamples bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 1ab0e4796..76a9e12f4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
s := &summary{
desc: desc,
+ now: opts.now,
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
@@ -280,6 +281,8 @@ type summary struct {
desc *Desc
+ now func() time.Time
+
objectives map[float64]float64
sortedObjectives []float64
@@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
s.bufMtx.Lock()
defer s.bufMtx.Unlock()
- now := time.Now()
+ now := s.now()
if now.After(s.hotBufExpTime) {
s.asyncFlush(now)
}
@@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
s.bufMtx.Lock()
s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
+ s.swapBufs(s.now())
s.bufMtx.Unlock()
s.flushColdBuf()
@@ -468,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
n := atomic.AddUint64(&s.countAndHotIdx, 1)
hotCounts := s.counts[n>>63]
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
+ atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
+ return oldVal + v
+ })
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
@@ -516,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
+
+ // Use atomicUpdateFloat to update hotCounts.sumBits atomically.
+ atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
+ return oldVal + sum.GetSampleSum()
+ })
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+
return nil
}
diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go
index 50593b6df..6a95da975 100644
--- a/vendor/golang.org/x/oauth2/pkce.go
+++ b/vendor/golang.org/x/oauth2/pkce.go
@@ -21,7 +21,7 @@ const (
//
// A fresh verifier should be generated for each authorization.
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
-// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
+// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
// (or Config.DeviceAccessToken).
func GenerateVerifier() string {
// "RECOMMENDED that the output of a suitable random number generator be
@@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string {
}
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
-// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
+// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
// only.
func S256ChallengeOption(verifier string) AuthCodeOption {
return challengeOption{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6d737a9b4..f52dadb59 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# cel.dev/expr v0.20.0
+# cel.dev/expr v0.21.2
## explicit; go 1.21.1
cel.dev/expr
# github.com/antlr4-go/antlr/v4 v4.13.1
@@ -93,8 +93,8 @@ github.com/google/gnostic-models/extensions
github.com/google/gnostic-models/jsonschema
github.com/google/gnostic-models/openapiv2
github.com/google/gnostic-models/openapiv3
-# github.com/google/go-cmp v0.6.0
-## explicit; go 1.13
+# github.com/google/go-cmp v0.7.0
+## explicit; go 1.21
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
@@ -128,12 +128,13 @@ github.com/jpillora/backoff
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.11
-## explicit; go 1.21
+# github.com/klauspost/compress v1.18.0
+## explicit; go 1.22
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/le
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@@ -231,12 +232,12 @@ github.com/operator-framework/operator-lib/internal/annotation
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
-# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0
+# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1
## explicit; go 1.23.0
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1
-# github.com/prometheus/client_golang v1.20.5
-## explicit; go 1.20
+# github.com/prometheus/client_golang v1.21.0
+## explicit; go 1.21
github.com/prometheus/client_golang/api
github.com/prometheus/client_golang/api/prometheus/v1
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
@@ -307,8 +308,8 @@ golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.26.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.27.0
+## explicit; go 1.23.0
golang.org/x/oauth2
golang.org/x/oauth2/clientcredentials
golang.org/x/oauth2/internal