From 65bc84c77ed39974a33f46d522d0a40a8cce132b Mon Sep 17 00:00:00 2001 From: Alex Cottner Date: Thu, 23 Jan 2025 14:34:12 -0700 Subject: [PATCH 1/4] AUT-199 - replacing statsd with prometheus. First draft. --- Makefile | 5 +- authorize.go | 37 +- autograph.yaml | 5 - docs/configuration.md | 16 - go.mod | 2 - go.sum | 33 - handlers.go | 22 +- internal/mockstatsd/clientinterface.go | 307 ------ main.go | 56 +- main_test.go | 4 - monitor.go | 4 - signer/signer.go | 46 - signer/xpi/cose_test.go | 6 +- signer/xpi/omnija_bench_test.go | 2 +- signer/xpi/recommendation_test.go | 18 +- signer/xpi/x509.go | 20 +- signer/xpi/x509_test.go | 6 +- signer/xpi/xpi.go | 8 +- signer/xpi/xpi_test.go | 32 +- stats.go | 144 ++- stats_test.go | 63 +- .../DataDog/datadog-go/v5/LICENSE.txt | 19 - .../DataDog/datadog-go/v5/statsd/README.md | 4 - .../datadog-go/v5/statsd/aggregator.go | 298 ------ .../DataDog/datadog-go/v5/statsd/buffer.go | 198 ---- .../datadog-go/v5/statsd/buffer_pool.go | 40 - .../v5/statsd/buffered_metric_context.go | 104 -- .../DataDog/datadog-go/v5/statsd/container.go | 19 - .../datadog-go/v5/statsd/container_linux.go | 219 ----- .../datadog-go/v5/statsd/container_stub.go | 17 - .../datadog-go/v5/statsd/error_handler.go | 22 - .../DataDog/datadog-go/v5/statsd/event.go | 75 -- .../DataDog/datadog-go/v5/statsd/fnv1a.go | 39 - .../DataDog/datadog-go/v5/statsd/format.go | 280 ------ .../DataDog/datadog-go/v5/statsd/metrics.go | 268 ------ .../DataDog/datadog-go/v5/statsd/noop.go | 118 --- .../DataDog/datadog-go/v5/statsd/options.go | 414 -------- .../DataDog/datadog-go/v5/statsd/pipe.go | 13 - .../datadog-go/v5/statsd/pipe_windows.go | 81 -- .../DataDog/datadog-go/v5/statsd/sender.go | 145 --- .../datadog-go/v5/statsd/service_check.go | 57 -- .../DataDog/datadog-go/v5/statsd/statsd.go | 907 ------------------ .../datadog-go/v5/statsd/statsd_direct.go | 69 -- .../DataDog/datadog-go/v5/statsd/telemetry.go | 307 ------ .../DataDog/datadog-go/v5/statsd/udp.go | 39 - .../DataDog/datadog-go/v5/statsd/uds.go | 167 ---- .../datadog-go/v5/statsd/uds_windows.go | 15 - .../DataDog/datadog-go/v5/statsd/utils.go | 32 - .../DataDog/datadog-go/v5/statsd/worker.go | 158 --- .../Microsoft/go-winio/.gitattributes | 1 - .../github.com/Microsoft/go-winio/.gitignore | 10 - .../Microsoft/go-winio/.golangci.yml | 147 --- .../github.com/Microsoft/go-winio/CODEOWNERS | 1 - vendor/github.com/Microsoft/go-winio/LICENSE | 22 - .../github.com/Microsoft/go-winio/README.md | 89 -- .../github.com/Microsoft/go-winio/SECURITY.md | 41 - .../github.com/Microsoft/go-winio/backup.go | 287 ------ vendor/github.com/Microsoft/go-winio/doc.go | 22 - vendor/github.com/Microsoft/go-winio/ea.go | 137 --- vendor/github.com/Microsoft/go-winio/file.go | 320 ------ .../github.com/Microsoft/go-winio/fileinfo.go | 106 -- .../github.com/Microsoft/go-winio/hvsock.go | 582 ----------- .../Microsoft/go-winio/internal/fs/doc.go | 2 - .../Microsoft/go-winio/internal/fs/fs.go | 262 ----- .../go-winio/internal/fs/security.go | 12 - .../go-winio/internal/fs/zsyscall_windows.go | 61 -- .../go-winio/internal/socket/rawaddr.go | 20 - .../go-winio/internal/socket/socket.go | 177 ---- .../internal/socket/zsyscall_windows.go | 69 -- .../go-winio/internal/stringbuffer/wstring.go | 132 --- vendor/github.com/Microsoft/go-winio/pipe.go | 586 ----------- .../Microsoft/go-winio/pkg/guid/guid.go | 232 ----- .../go-winio/pkg/guid/guid_nonwindows.go | 16 - .../go-winio/pkg/guid/guid_windows.go | 13 - .../go-winio/pkg/guid/variant_string.go | 27 - .../Microsoft/go-winio/privilege.go | 196 ---- .../github.com/Microsoft/go-winio/reparse.go | 131 --- vendor/github.com/Microsoft/go-winio/sd.go | 133 --- .../github.com/Microsoft/go-winio/syscall.go | 5 - .../Microsoft/go-winio/zsyscall_windows.go | 378 -------- .../client_golang/prometheus/testutil/lint.go | 46 + .../prometheus/testutil/promlint/problem.go | 33 + .../prometheus/testutil/promlint/promlint.go | 123 +++ .../testutil/promlint/validation.go | 34 + .../validations/counter_validations.go | 40 + .../validations/duplicate_validations.go | 37 + .../validations/generic_name_validations.go | 101 ++ .../promlint/validations/help_validations.go | 32 + .../validations/histogram_validations.go | 63 ++ .../testutil/promlint/validations/units.go | 118 +++ .../prometheus/testutil/testutil.go | 332 +++++++ vendor/modules.txt | 13 +- 92 files changed, 1160 insertions(+), 8989 deletions(-) delete mode 100644 internal/mockstatsd/clientinterface.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/README.md delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/container.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/error_handler.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/event.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/format.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/options.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/statsd_direct.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go delete mode 100644 vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go delete mode 100644 vendor/github.com/Microsoft/go-winio/.gitattributes delete mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore delete mode 100644 vendor/github.com/Microsoft/go-winio/.golangci.yml delete mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS delete mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE delete mode 100644 vendor/github.com/Microsoft/go-winio/README.md delete mode 100644 vendor/github.com/Microsoft/go-winio/SECURITY.md delete mode 100644 vendor/github.com/Microsoft/go-winio/backup.go delete mode 100644 vendor/github.com/Microsoft/go-winio/doc.go delete mode 100644 vendor/github.com/Microsoft/go-winio/ea.go delete mode 100644 vendor/github.com/Microsoft/go-winio/file.go delete mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go delete mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/socket.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go delete mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go delete mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go delete mode 100644 vendor/github.com/Microsoft/go-winio/sd.go delete mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go delete mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go diff --git a/Makefile b/Makefile index 94c185974..952c02db5 100644 --- a/Makefile +++ b/Makefile @@ -92,11 +92,8 @@ build: generate integration-test: ./bin/run_integration_tests.sh -dummy-statsd: - nc -kluvw 0 localhost 8125 - .SUFFIXES: # Delete the default suffixes -.PHONY: all dummy-statsd test generate vendor integration-test check-no-crypto11-in-signers test-in-docker +.PHONY: all test generate vendor integration-test check-no-crypto11-in-signers test-in-docker # build the docker image, and run it locally with our source code mounted under /app/src/autograph # run as root to allow for changes, package installs, etc diff --git a/authorize.go b/authorize.go index a35b99283..18707bae9 100644 --- a/authorize.go +++ b/authorize.go @@ -13,7 +13,7 @@ import ( "regexp" "time" - log "github.com/sirupsen/logrus" + "github.com/prometheus/client_golang/prometheus" "go.mozilla.org/hawk" ) @@ -47,19 +47,17 @@ func (a *autographer) authorizeHeader(r *http.Request) (auth *hawk.Auth, userid return nil, "", fmt.Errorf("missing Authorization header") } auth, err = hawk.ParseRequestHeader(r.Header.Get("Authorization")) - sendStatsErr := a.stats.Timing("hawk.header_parsed", time.Since(getRequestStartTime(r)), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.header_parsed: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_header_parsed", + }).Observe(float64(time.Since(getRequestStartTime(r)).Milliseconds())) if err != nil { return nil, "", err } userid = auth.Credentials.ID auth, err = hawk.NewAuthFromRequest(r, a.lookupCred(userid), a.lookupNonce) - sendStatsErr = a.stats.Timing("hawk.auth_created", time.Since(getRequestStartTime(r)), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.auth_created: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_auth_created", + }).Observe(float64(time.Since(getRequestStartTime(r)).Milliseconds())) if err != nil { return nil, "", err } @@ -69,15 +67,13 @@ func (a *autographer) authorizeHeader(r *http.Request) (auth *hawk.Auth, userid } hawk.MaxTimestampSkew = a.hawkMaxTimestampSkew err = auth.Valid() - sendStatsErr = a.stats.Timing("hawk.validated", time.Since(getRequestStartTime(r)), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.validated: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_validated", + }).Observe(float64(time.Since(getRequestStartTime(r)).Milliseconds())) skew := abs(auth.ActualTimestamp.Sub(auth.Timestamp)) - sendStatsErr = a.stats.Timing("hawk.timestamp_skew", skew, nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.timestamp_skew: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_timestamp_skew", + }).Observe(float64(skew.Milliseconds())) if err != nil { return nil, "", err @@ -90,10 +86,9 @@ func (a *autographer) authorizeHeader(r *http.Request) (auth *hawk.Auth, userid func (a *autographer) authorizeBody(auth *hawk.Auth, r *http.Request, body []byte) (err error) { payloadhash := auth.PayloadHash(r.Header.Get("Content-Type")) payloadhash.Write(body) - sendStatsErr := a.stats.Timing("hawk.payload_hashed", time.Since(getRequestStartTime(r)), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.payload_hashed: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_payload_hashed", + }).Observe(float64(time.Since(getRequestStartTime(r)).Milliseconds())) if !auth.ValidHash(payloadhash) { return fmt.Errorf("payload validation failed") } diff --git a/autograph.yaml b/autograph.yaml index 4d54dd3be..8d7454812 100644 --- a/autograph.yaml +++ b/autograph.yaml @@ -6,11 +6,6 @@ server: readtimeout: 60s writetimeout: 60s -statsd: - addr: "127.0.0.1:8125" - namespace: "autograph." - buflen: 1 - debugserver: listen: "0.0.0.0:2112" diff --git a/docs/configuration.md b/docs/configuration.md index 0bca72f35..b9acbe8ae 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -21,22 +21,6 @@ server: Use flag `-p` to provide an alternate port and override any port specified in the config. -## Statsd - -Optionally, configure statsd with: - -- *addr* a UDP host and port to send statsd stats to -- *namespace* a statsd prefix -- *buflen* the number of statsd commands to buffer before sending or - 100ms elapses in which case the buffer is flushed - -``` yaml -statsd: - addr: "127.0.0.1:8125" - namespace: "autograph." - buflen: 1 -``` - ## Database Optionally, configure postgres using the sample below. Use the schema in diff --git a/go.mod b/go.mod index 51f517261..d9ac4b9bc 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/mozilla-services/autograph go 1.23.4 require ( - github.com/DataDog/datadog-go/v5 v5.6.0 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.33.0 github.com/aws/aws-sdk-go-v2/config v1.29.1 @@ -48,7 +47,6 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.54 // indirect diff --git a/go.sum b/go.sum index 1a90e07e7..5ff752ec3 100644 --- a/go.sum +++ b/go.sum @@ -47,8 +47,6 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= -github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI= @@ -57,7 +55,6 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0/go.mod h1:l2fIqmwB+FKSfvn3bAD/0i+AXAxhIZjTK2svT/mgUXs= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 h1:GYUJLfvd++4DMuMhCFLgLXvFwofIxh/qOwoGuS/LTew= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -169,7 +166,6 @@ github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -283,20 +279,16 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -313,7 +305,6 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/youtube/vitess v2.1.1+incompatible h1:SE+P7DNX/jw5RHFs5CHRhZQjq402EJFCD33JhzQMdDw= github.com/youtube/vitess v2.1.1+incompatible/go.mod h1:hpMim5/30F1r+0P8GGtB29d0gWHr0IZ5unS+CG0zMx8= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.mozilla.org/cose v0.0.0-20200221144611-2ea72a6b3de3 h1:+06j/3Sl6VUyvzrDor4UrtxuCQQ67sUpUtKFm5KqYKU= go.mozilla.org/cose v0.0.0-20200221144611-2ea72a6b3de3/go.mod h1:NitxzJTubT7Y6B94irV0gYJeNT224l4AOv10qjgilLU= go.mozilla.org/hawk v0.0.0-20190327210923-a483e4a7047e h1:EHC+jNgDT61H9gWumkg+1bc5/+lYAhynV+GhgTrtUtc= @@ -348,50 +339,26 @@ go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qq go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/genproto v0.0.0-20241223144023-3abc09e42ca8 h1:e26eS1K69yxjjNNHYqjN49y95kcaQLJ3TL5h68dcA1E= diff --git a/handlers.go b/handlers.go index 4ce9fee43..c995f58d0 100644 --- a/handlers.go +++ b/handlers.go @@ -105,10 +105,9 @@ func (a *autographer) handleSignature(w http.ResponseWriter, r *http.Request) { starttime := getRequestStartTime(r) auth, userid, err := a.authorizeHeader(r) if err != nil { - sendStatsErr := a.stats.Timing("hawk.authorize_header_failed", time.Since(starttime), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending hawk.authorize_header_failed: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "hawk_authorize_header_failed", + }).Observe(float64(time.Since(starttime).Milliseconds())) httpError(w, r, http.StatusUnauthorized, "authorization verification failed: %v", err) return } @@ -132,20 +131,18 @@ func (a *autographer) handleSignature(w http.ResponseWriter, r *http.Request) { return } err = a.authorizeBody(auth, r, body) - sendStatsErr := a.stats.Timing("authorize_finished", time.Since(starttime), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending authorize_finished: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "authorize_finished", + }).Observe(float64(time.Since(starttime).Milliseconds())) if err != nil { httpError(w, r, http.StatusUnauthorized, "authorization verification failed: %v", err) return } var sigreqs []formats.SignatureRequest err = json.Unmarshal(body, &sigreqs) - sendStatsErr = a.stats.Timing("body_unmarshaled", time.Since(starttime), nil, 1.0) - if sendStatsErr != nil { - log.Warnf("Error sending body_unmarshaled: %s", sendStatsErr) - } + signerRequestsTiming.With(prometheus.Labels{ + "step": "body_unmarshaled", + }).Observe(float64(time.Since(starttime).Milliseconds())) if err != nil { httpError(w, r, http.StatusBadRequest, "failed to parse request body: %v", err) return @@ -218,7 +215,6 @@ func (a *autographer) handleSignature(w http.ResponseWriter, r *http.Request) { return } requestedSignerConfig := requestedSigner.Config() - a.stats.Incr("signer.requests", []string{"keyid:" + requestedSignerConfig.ID, "user:" + userid, usedDefaultSignerTag(sigreq)}, 1.0) signerRequestsCounter.With(prometheus.Labels{ "keyid": requestedSignerConfig.ID, "user": userid, diff --git a/internal/mockstatsd/clientinterface.go b/internal/mockstatsd/clientinterface.go deleted file mode 100644 index 904033173..000000000 --- a/internal/mockstatsd/clientinterface.go +++ /dev/null @@ -1,307 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go -// -// Generated by this command: -// -// mockgen -package mockstatsd -source vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go -destination internal/mockstatsd/clientinterface.go -imports github.com/mozilla-services/autograph/vendor/github.com/DataDog/datadog-go/v5/statsd=github.com/DataDog/datadog-go/v5/statsd ClientInterface -// - -// Package mockstatsd is a generated GoMock package. -package mockstatsd - -import ( - reflect "reflect" - time "time" - - statsd "github.com/DataDog/datadog-go/v5/statsd" - gomock "go.uber.org/mock/gomock" -) - -// MockClientInterface is a mock of ClientInterface interface. -type MockClientInterface struct { - ctrl *gomock.Controller - recorder *MockClientInterfaceMockRecorder -} - -// MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface. -type MockClientInterfaceMockRecorder struct { - mock *MockClientInterface -} - -// NewMockClientInterface creates a new mock instance. -func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface { - mock := &MockClientInterface{ctrl: ctrl} - mock.recorder = &MockClientInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockClientInterface) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockClientInterfaceMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClientInterface)(nil).Close)) -} - -// Count mocks base method. -func (m *MockClientInterface) Count(name string, value int64, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Count", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Count indicates an expected call of Count. -func (mr *MockClientInterfaceMockRecorder) Count(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockClientInterface)(nil).Count), name, value, tags, rate) -} - -// CountWithTimestamp mocks base method. -func (m *MockClientInterface) CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CountWithTimestamp", name, value, tags, rate, timestamp) - ret0, _ := ret[0].(error) - return ret0 -} - -// CountWithTimestamp indicates an expected call of CountWithTimestamp. -func (mr *MockClientInterfaceMockRecorder) CountWithTimestamp(name, value, tags, rate, timestamp any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountWithTimestamp", reflect.TypeOf((*MockClientInterface)(nil).CountWithTimestamp), name, value, tags, rate, timestamp) -} - -// Decr mocks base method. -func (m *MockClientInterface) Decr(name string, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decr", name, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Decr indicates an expected call of Decr. -func (mr *MockClientInterfaceMockRecorder) Decr(name, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decr", reflect.TypeOf((*MockClientInterface)(nil).Decr), name, tags, rate) -} - -// Distribution mocks base method. -func (m *MockClientInterface) Distribution(name string, value float64, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Distribution", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Distribution indicates an expected call of Distribution. -func (mr *MockClientInterfaceMockRecorder) Distribution(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Distribution", reflect.TypeOf((*MockClientInterface)(nil).Distribution), name, value, tags, rate) -} - -// Event mocks base method. -func (m *MockClientInterface) Event(e *statsd.Event) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Event", e) - ret0, _ := ret[0].(error) - return ret0 -} - -// Event indicates an expected call of Event. -func (mr *MockClientInterfaceMockRecorder) Event(e any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockClientInterface)(nil).Event), e) -} - -// Flush mocks base method. -func (m *MockClientInterface) Flush() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Flush") - ret0, _ := ret[0].(error) - return ret0 -} - -// Flush indicates an expected call of Flush. -func (mr *MockClientInterfaceMockRecorder) Flush() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockClientInterface)(nil).Flush)) -} - -// Gauge mocks base method. -func (m *MockClientInterface) Gauge(name string, value float64, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Gauge", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Gauge indicates an expected call of Gauge. -func (mr *MockClientInterfaceMockRecorder) Gauge(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gauge", reflect.TypeOf((*MockClientInterface)(nil).Gauge), name, value, tags, rate) -} - -// GaugeWithTimestamp mocks base method. -func (m *MockClientInterface) GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GaugeWithTimestamp", name, value, tags, rate, timestamp) - ret0, _ := ret[0].(error) - return ret0 -} - -// GaugeWithTimestamp indicates an expected call of GaugeWithTimestamp. -func (mr *MockClientInterfaceMockRecorder) GaugeWithTimestamp(name, value, tags, rate, timestamp any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GaugeWithTimestamp", reflect.TypeOf((*MockClientInterface)(nil).GaugeWithTimestamp), name, value, tags, rate, timestamp) -} - -// GetTelemetry mocks base method. -func (m *MockClientInterface) GetTelemetry() statsd.Telemetry { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTelemetry") - ret0, _ := ret[0].(statsd.Telemetry) - return ret0 -} - -// GetTelemetry indicates an expected call of GetTelemetry. -func (mr *MockClientInterfaceMockRecorder) GetTelemetry() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTelemetry", reflect.TypeOf((*MockClientInterface)(nil).GetTelemetry)) -} - -// Histogram mocks base method. -func (m *MockClientInterface) Histogram(name string, value float64, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Histogram", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Histogram indicates an expected call of Histogram. -func (mr *MockClientInterfaceMockRecorder) Histogram(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Histogram", reflect.TypeOf((*MockClientInterface)(nil).Histogram), name, value, tags, rate) -} - -// Incr mocks base method. -func (m *MockClientInterface) Incr(name string, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Incr", name, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Incr indicates an expected call of Incr. -func (mr *MockClientInterfaceMockRecorder) Incr(name, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Incr", reflect.TypeOf((*MockClientInterface)(nil).Incr), name, tags, rate) -} - -// IsClosed mocks base method. -func (m *MockClientInterface) IsClosed() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsClosed") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsClosed indicates an expected call of IsClosed. -func (mr *MockClientInterfaceMockRecorder) IsClosed() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsClosed", reflect.TypeOf((*MockClientInterface)(nil).IsClosed)) -} - -// ServiceCheck mocks base method. -func (m *MockClientInterface) ServiceCheck(sc *statsd.ServiceCheck) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ServiceCheck", sc) - ret0, _ := ret[0].(error) - return ret0 -} - -// ServiceCheck indicates an expected call of ServiceCheck. -func (mr *MockClientInterfaceMockRecorder) ServiceCheck(sc any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).ServiceCheck), sc) -} - -// Set mocks base method. -func (m *MockClientInterface) Set(name, value string, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Set", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Set indicates an expected call of Set. -func (mr *MockClientInterfaceMockRecorder) Set(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClientInterface)(nil).Set), name, value, tags, rate) -} - -// SimpleEvent mocks base method. -func (m *MockClientInterface) SimpleEvent(title, text string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimpleEvent", title, text) - ret0, _ := ret[0].(error) - return ret0 -} - -// SimpleEvent indicates an expected call of SimpleEvent. -func (mr *MockClientInterfaceMockRecorder) SimpleEvent(title, text any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleEvent", reflect.TypeOf((*MockClientInterface)(nil).SimpleEvent), title, text) -} - -// SimpleServiceCheck mocks base method. -func (m *MockClientInterface) SimpleServiceCheck(name string, status statsd.ServiceCheckStatus) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimpleServiceCheck", name, status) - ret0, _ := ret[0].(error) - return ret0 -} - -// SimpleServiceCheck indicates an expected call of SimpleServiceCheck. -func (mr *MockClientInterfaceMockRecorder) SimpleServiceCheck(name, status any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).SimpleServiceCheck), name, status) -} - -// TimeInMilliseconds mocks base method. -func (m *MockClientInterface) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TimeInMilliseconds", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// TimeInMilliseconds indicates an expected call of TimeInMilliseconds. -func (mr *MockClientInterfaceMockRecorder) TimeInMilliseconds(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeInMilliseconds", reflect.TypeOf((*MockClientInterface)(nil).TimeInMilliseconds), name, value, tags, rate) -} - -// Timing mocks base method. -func (m *MockClientInterface) Timing(name string, value time.Duration, tags []string, rate float64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Timing", name, value, tags, rate) - ret0, _ := ret[0].(error) - return ret0 -} - -// Timing indicates an expected call of Timing. -func (mr *MockClientInterfaceMockRecorder) Timing(name, value, tags, rate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timing", reflect.TypeOf((*MockClientInterface)(nil).Timing), name, value, tags, rate) -} diff --git a/main.go b/main.go index 381151595..f94eb926d 100644 --- a/main.go +++ b/main.go @@ -41,7 +41,6 @@ import ( sops "github.com/getsops/sops/v3" "github.com/getsops/sops/v3/decrypt" - "github.com/DataDog/datadog-go/v5/statsd" "github.com/mozilla-services/autograph/crypto11" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -55,11 +54,6 @@ type configuration struct { ReadTimeout time.Duration WriteTimeout time.Duration } - Statsd struct { - Addr string - Namespace string - Buflen int - } // DebugServer are the settings for the control plane HTTP server where // metrics are exposed for collection and some limited utitilites can live. @@ -85,7 +79,6 @@ type debugServerConfig struct { // with all signers and permissions configured type autographer struct { db *database.Handler - stats statsd.ClientInterface nonces *lru.Cache debug bool heartbeatConf *heartbeatConfig @@ -182,11 +175,6 @@ func run(conf configuration, listen string, debug bool) { } } - err = ag.addStats(conf) - if err != nil { - log.Fatal(err) - } - err = ag.addSigners(conf.Signers) if err != nil { log.Fatal(err) @@ -218,18 +206,16 @@ func run(conf configuration, listen string, debug bool) { // Initialize a monitor. monitor := newMonitor(ag, conf.MonitorInterval) - stats := ag.stats - router := mux.NewRouter().StrictSlash(true) - router.HandleFunc("/__heartbeat__", statsMiddleware(ag.handleHeartbeat, "http.nonapi.heartbeat", stats)).Methods("GET") - router.HandleFunc("/__lbheartbeat__", statsMiddleware(handleLBHeartbeat, "http.nonapi.lbheartbeat", stats)).Methods("GET") - router.HandleFunc("/__version__", statsMiddleware(handleVersion, "http.nonapi.version", stats)).Methods("GET") - router.HandleFunc("/__monitor__", statsMiddleware(monitor.handleMonitor, "http.nonapi.monitor", stats)).Methods("GET") - router.HandleFunc("/sign/files", apiStatsMiddleware(ag.handleSignature, "http.api.sign/files", stats)).Methods("POST") - router.HandleFunc("/sign/file", apiStatsMiddleware(ag.handleSignature, "http.api.sign/file", stats)).Methods("POST") - router.HandleFunc("/sign/data", apiStatsMiddleware(ag.handleSignature, "http.api.sign/data", stats)).Methods("POST") - router.HandleFunc("/sign/hash", apiStatsMiddleware(ag.handleSignature, "http.api.sign/hash", stats)).Methods("POST") - router.HandleFunc("/auths/{auth_id:[a-zA-Z0-9-_]{1,255}}/keyids", apiStatsMiddleware(ag.handleGetAuthKeyIDs, "http.api.getauthkeyids", stats)).Methods("GET") + router.HandleFunc("/__heartbeat__", statsMiddleware(ag.handleHeartbeat, "http.nonapi.heartbeat")).Methods("GET") + router.HandleFunc("/__lbheartbeat__", statsMiddleware(handleLBHeartbeat, "http.nonapi.lbheartbeat")).Methods("GET") + router.HandleFunc("/__version__", statsMiddleware(handleVersion, "http.nonapi.version")).Methods("GET") + router.HandleFunc("/__monitor__", statsMiddleware(monitor.handleMonitor, "http.nonapi.monitor")).Methods("GET") + router.HandleFunc("/sign/files", apiStatsMiddleware(ag.handleSignature, "http.api.sign/files")).Methods("POST") + router.HandleFunc("/sign/file", apiStatsMiddleware(ag.handleSignature, "http.api.sign/file")).Methods("POST") + router.HandleFunc("/sign/data", apiStatsMiddleware(ag.handleSignature, "http.api.sign/data")).Methods("POST") + router.HandleFunc("/sign/hash", apiStatsMiddleware(ag.handleSignature, "http.api.sign/hash")).Methods("POST") + router.HandleFunc("/auths/{auth_id:[a-zA-Z0-9-_]{1,255}}/keyids", apiStatsMiddleware(ag.handleGetAuthKeyIDs, "http.api.getauthkeyids")).Methods("GET") // For each signer with a local chain upload location (eg: using the file // scheme) create an handler to serve that directory at the path /x5u/keyid/ @@ -244,13 +230,6 @@ func run(conf configuration, listen string, debug bool) { router.PathPrefix(prefix).Handler(http.StripPrefix(prefix, http.FileServer(http.Dir(parsedURL.Path)))) } - go func() { - time.Sleep(5 * time.Minute) - ag.stats.Incr(foobarTestCounterName, []string{"statsd:yes"}, 1) - foobarTestCounter.Inc() - promOnlyFoobarTestCounterName.Inc() - }() - if conf.DebugServer.Listen != "" { log.Infof("starting debug server on %s", conf.DebugServer.Listen) go func() { @@ -350,7 +329,6 @@ func newAutographer(cachesize int) (a *autographer) { a.authBackend = newInMemoryAuthBackend() a.nonces, err = lru.New(cachesize) a.exit = make(chan interface{}) - a.stats = &statsd.NoOpClient{} if err != nil { log.Fatal(err) } @@ -493,14 +471,14 @@ func (a *autographer) addSigners(signerConfs []signer.Configuration) error { } sids[signerConf.ID] = true var ( - s signer.Signer - statsClient *signer.StatsClient - err error + s signer.Signer + // statsClient *signer.StatsClient + err error ) - statsClient, err = signer.NewStatsClient(signerConf, a.stats) - if statsClient == nil || err != nil { - return fmt.Errorf("failed to add signer stats client %q or got back nil statsClient: %w", signerConf.ID, err) - } + // statsClient, err = signer.NewStatsClient(signerConf) + // if statsClient == nil || err != nil { + // return fmt.Errorf("failed to add signer stats client %q or got back nil statsClient: %w", signerConf.ID, err) + // } // give the database handler to the signer configuration if a.db != nil { signerConf.DB = a.db @@ -517,7 +495,7 @@ func (a *autographer) addSigners(signerConfs []signer.Configuration) error { return fmt.Errorf("failed to add signer %q: %w", signerConf.ID, err) } case xpi.Type: - s, err = xpi.New(signerConf, statsClient) + s, err = xpi.New(signerConf) if err != nil { return fmt.Errorf("failed to add signer %q: %w", signerConf.ID, err) } diff --git a/main_test.go b/main_test.go index ab4a3e2a9..72ee79b36 100644 --- a/main_test.go +++ b/main_test.go @@ -36,10 +36,6 @@ func newTestAutographer(t *testing.T) (*autographer, configuration) { if err != nil { log.Fatal(err) } - err = ag.addStats(conf) - if err != nil { - log.Fatal(err) - } if conf.HawkTimestampValidity != "" { ag.hawkMaxTimestampSkew, err = time.ParseDuration(conf.HawkTimestampValidity) if err != nil { diff --git a/monitor.go b/monitor.go index 659904716..c1daef697 100644 --- a/monitor.go +++ b/monitor.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "github.com/DataDog/datadog-go/v5/statsd" "github.com/mozilla-services/autograph/formats" "github.com/mozilla-services/autograph/signer" log "github.com/sirupsen/logrus" @@ -37,8 +36,6 @@ type monitor struct { // Closed on exit of the autographer instance. exit chan interface{} - - stats statsd.ClientInterface } // The monitor loop, should run in a separate goroutine. @@ -143,7 +140,6 @@ func newMonitor(ag *autographer, duration time.Duration) *monitor { m.initialized = make(chan interface{}) m.exit = ag.exit m.debug = ag.debug - m.stats = ag.stats go m.start(duration) diff --git a/signer/signer.go b/signer/signer.go index e2067c040..a03554bf1 100644 --- a/signer/signer.go +++ b/signer/signer.go @@ -25,10 +25,7 @@ import ( "github.com/mozilla-services/autograph/database" "github.com/mozilla-services/autograph/formats" - "github.com/DataDog/datadog-go/v5/statsd" "github.com/mozilla-services/autograph/crypto11" - - log "github.com/sirupsen/logrus" ) // IDFormat is a regex for the format IDs must follow @@ -445,46 +442,3 @@ func (cfg *Configuration) MakeKey(keyTpl interface{}, keyName string) (priv cryp return nil, nil, fmt.Errorf("making key of type %T is not supported", keyTpl) } } - -// StatsClient is a helper for sending statsd stats with the relevant -// tags for the signer and error handling -type StatsClient struct { - // signerTags is the - signerTags []string - - // stats is the statsd client for reporting metrics - stats statsd.ClientInterface -} - -// NewStatsClient makes a new stats client -func NewStatsClient(signerConfig Configuration, stats statsd.ClientInterface) (*StatsClient, error) { - return &StatsClient{ - stats: stats, - signerTags: []string{ - fmt.Sprintf("autograph-signer-id:%s", signerConfig.ID), - fmt.Sprintf("autograph-signer-type:%s", signerConfig.Type), - fmt.Sprintf("autograph-signer-mode:%s", signerConfig.Mode), - }, - }, nil -} - -// SendGauge checks for a statsd client and when one is present sends -// a statsd gauge with the given name, int value cast to float64, tags -// for the signer, and sampling rate of 1 -func (s *StatsClient) SendGauge(name string, value int) { - err := s.stats.Gauge(name, float64(value), s.signerTags, 1) - if err != nil { - log.Warnf("Error sending gauge %s: %s", name, err) - } -} - -// SendHistogram checks for a statsd client and when one is present -// sends a statsd histogram with the given name, time.Duration value -// converted to ms, cast to float64, tags for the signer, and sampling -// rate of 1 -func (s *StatsClient) SendHistogram(name string, value time.Duration) { - err := s.stats.Histogram(name, float64(value/time.Millisecond), s.signerTags, 1) - if err != nil { - log.Warnf("Error sending histogram %s: %s", name, err) - } -} diff --git a/signer/xpi/cose_test.go b/signer/xpi/cose_test.go index 68658a419..6ead4e2f1 100644 --- a/signer/xpi/cose_test.go +++ b/signer/xpi/cose_test.go @@ -76,7 +76,7 @@ func TestGenerateCOSEKeyPair(t *testing.T) { // returns an initialized XPI signer initSigner := func(t *testing.T) *XPISigner { testcase := validSignerConfigs[0] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -507,7 +507,7 @@ func TestVerifyCOSESignaturesErrs(t *testing.T) { t.Fatalf("error unmarshaling invalidSigBytes %q", err) } - s, err := New(validSignerConfigs[0], nil) + s, err := New(validSignerConfigs[0]) if err != nil { t.Fatalf("signer initialization failed with: %q", err) } @@ -845,7 +845,7 @@ func TestVerifyCOSESignaturesErrs(t *testing.T) { func TestIssueCOSESignatureErrs(t *testing.T) { t.Parallel() - signer, err := New(validSignerConfigs[0], nil) + signer, err := New(validSignerConfigs[0]) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } diff --git a/signer/xpi/omnija_bench_test.go b/signer/xpi/omnija_bench_test.go index 71db1204c..1bd84d153 100644 --- a/signer/xpi/omnija_bench_test.go +++ b/signer/xpi/omnija_bench_test.go @@ -8,7 +8,7 @@ func BenchmarkResignOmnija(b *testing.B) { // initialize a system addon signer with an RSA key testcase := validSignerConfigs[1] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { b.Fatalf("signer initialization failed with: %v", err) } diff --git a/signer/xpi/recommendation_test.go b/signer/xpi/recommendation_test.go index f743a4ca3..c2f79bc33 100644 --- a/signer/xpi/recommendation_test.go +++ b/signer/xpi/recommendation_test.go @@ -187,7 +187,7 @@ func TestMakeRecommendationFile(t *testing.T) { t.Parallel() // initialize a signer - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("testcase signer initialization failed with: %v", err) } @@ -220,7 +220,7 @@ func TestMakeRecommendationFile(t *testing.T) { t.Parallel() // initialize a signer - s, err := New(validSignerConfigs[0], nil) + s, err := New(validSignerConfigs[0]) if err != nil { t.Fatalf("testcase %d signer initialization failed with: %v", 0, err) } @@ -248,7 +248,7 @@ func TestMakeRecommendationFile(t *testing.T) { } // initialize a signer - s, err := New(dupRecTestCase, nil) + s, err := New(dupRecTestCase) if err != nil { t.Fatalf("testcase signer initialization failed with: %v", err) } @@ -272,7 +272,7 @@ func TestMakeRecommendationFile(t *testing.T) { t.Parallel() // initialize a signer - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("testcase signer initialization failed with: %v", err) } @@ -350,7 +350,7 @@ func TestRecommendationNotIncludedInOtherSignerModes(t *testing.T) { t.Parallel() // initialize a signer - s, err := New(tc, nil) + s, err := New(tc) if err != nil { t.Fatalf("testcase %d signer initialization failed with: %v", i, err) } @@ -388,7 +388,7 @@ func TestSignFileWithRecommendation(t *testing.T) { t.Run("signs unsignedbootstrap with PK7", func(t *testing.T) { input := unsignedBootstrap - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -420,7 +420,7 @@ func TestSignFileWithRecommendation(t *testing.T) { t.Fatalf("failed to add issuer cert to pool") } - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -446,7 +446,7 @@ func TestSignFileWithRecommendation(t *testing.T) { t.Run("signs unsignedbootstrap with PK7 fails for disallowed rec. state", func(t *testing.T) { input := unsignedBootstrap - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -463,7 +463,7 @@ func TestSignFileWithRecommendation(t *testing.T) { t.Run("signs unsigned with rec PK7 and overwrites existing rec file", func(t *testing.T) { input := unsignedBootstrap - s, err := New(recTestCase, nil) + s, err := New(recTestCase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } diff --git a/signer/xpi/x509.go b/signer/xpi/x509.go index f88c40044..a6255f8c4 100644 --- a/signer/xpi/x509.go +++ b/signer/xpi/x509.go @@ -13,8 +13,16 @@ import ( "time" "go.mozilla.org/cose" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) +var signerRequestsHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "signer_histogram", + Help: "A histogram for signer request timing", +}, []string{"keyid", "type", "mode", "name"}) + func (s *XPISigner) getRsaKey(size int) (*rsa.PrivateKey, error) { var ( err error @@ -24,9 +32,15 @@ func (s *XPISigner) getRsaKey(size int) (*rsa.PrivateKey, error) { start = time.Now() key, err = rsa.GenerateKey(s.rand, size) - if s.stats != nil { - s.stats.SendHistogram("xpi.rsa_cache.get_key", time.Since(start)) - } + // send a histogram with the given name, time.Duration value + // converted to ms, cast to float64, tags for the signer + signerRequestsHistogram.With(prometheus.Labels{ + "keyid": s.KeyID, + "type": s.Type, + "mode": s.Mode, + "name": "xpi.rsa_cache.get_key", + }).Observe(float64(time.Since(start).Milliseconds())) + return key, err } diff --git a/signer/xpi/x509_test.go b/signer/xpi/x509_test.go index 84749b709..a6411fe4b 100644 --- a/signer/xpi/x509_test.go +++ b/signer/xpi/x509_test.go @@ -16,7 +16,7 @@ func TestMakeEndEntity(t *testing.T) { // returns an initialized XPI signer initSigner := func(t *testing.T, testcaseid int) *XPISigner { testcase := validSignerConfigs[testcaseid] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -116,7 +116,7 @@ func TestGetIssuerRSAKeySize(t *testing.T) { // returns an initialized XPI signer initSigner := func(t *testing.T, testcaseid int) *XPISigner { testcase := validSignerConfigs[testcaseid] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } @@ -159,7 +159,7 @@ func TestGetIssuerECDSACurve(t *testing.T) { // returns an initialized XPI signer initSigner := func(t *testing.T, testcaseid int) *XPISigner { testcase := validSignerConfigs[testcaseid] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } diff --git a/signer/xpi/xpi.go b/signer/xpi/xpi.go index eea249b73..de19d9ef7 100644 --- a/signer/xpi/xpi.go +++ b/signer/xpi/xpi.go @@ -83,9 +83,6 @@ type XPISigner struct { // rand is the CSPRNG to use from the HSM or crypto/rand rand io.Reader - // stats is the statsd client for reporting metrics - stats *signer.StatsClient - // recommendationAllowedStates is a map of strings the signer // is allowed to set in the recommendations file to true // indicating whether they're allowed or not @@ -111,9 +108,7 @@ type XPISigner struct { } // New initializes an XPI signer using a configuration -func New(conf signer.Configuration, stats *signer.StatsClient) (s *XPISigner, err error) { - // TODO(AUT-160): instead of doing nil checks for stats all over XPISigner, - // we could just check it here once or provide a null object version of it for tests. +func New(conf signer.Configuration) (s *XPISigner, err error) { s = new(XPISigner) if conf.Type != Type { return nil, fmt.Errorf("xpi: invalid type %q, must be %q", conf.Type, Type) @@ -177,7 +172,6 @@ func New(conf signer.Configuration, stats *signer.StatsClient) (s *XPISigner, er return nil, fmt.Errorf("xpi: unknown signer mode %q, must be 'add-on', 'extension', 'system add-on' or 'hotfix'", conf.Mode) } s.Mode = conf.Mode - s.stats = stats if conf.Mode == ModeAddOnWithRecommendation { s.recommendationAllowedStates = conf.RecommendationConfig.AllowedStates diff --git a/signer/xpi/xpi_test.go b/signer/xpi/xpi_test.go index df4344ab5..6eba080de 100644 --- a/signer/xpi/xpi_test.go +++ b/signer/xpi/xpi_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/DataDog/datadog-go/v5/statsd" "github.com/mozilla-services/autograph/signer" "go.mozilla.org/pkcs7" ) @@ -46,12 +45,7 @@ func TestSignFile(t *testing.T) { t.Run(fmt.Sprintf("test sign file %s signer id %s (%d)", input.name, testcase.ID, i), func(t *testing.T) { t.Parallel() - signerStatsClient, err := signer.NewStatsClient(testcase, &statsd.NoOpClient{}) - if err != nil { - t.Fatalf("passing testcase %d: Error constructing signer.StatsdClient: %v", i, err) - } - - s, err := New(testcase, signerStatsClient) + s, err := New(testcase) if err != nil { t.Fatalf("passing testcase %d: signer initialization failed with: %v", i, err) } @@ -96,7 +90,7 @@ func TestSignData(t *testing.T) { t.Parallel() // initialize a signer - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("testcase %d signer initialization failed with: %v", i, err) } @@ -153,7 +147,7 @@ func TestSignDataAndVerifyWithOpenSSL(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -234,7 +228,7 @@ func TestSignDataWithPKCS7VerifiesDigests(t *testing.T) { input := []byte("foobarbaz1234abcd") testcase := validSignerConfigs[3] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -268,7 +262,7 @@ func TestNewFailure(t *testing.T) { t.Parallel() for i, testcase := range invalidSignerConfigs { - _, err := New(testcase.cfg, nil) + _, err := New(testcase.cfg) // check for lack of error first, otherwise `err.Error()` will cause a panic if no error // is present. if err == nil { @@ -283,7 +277,7 @@ func TestOptionsP7Digest(t *testing.T) { t.Parallel() testcase := validSignerConfigs[3] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -319,7 +313,7 @@ func TestNoID(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -337,7 +331,7 @@ func TestBadCOSEAlgsErrs(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -373,7 +367,7 @@ func TestBadPKCS7DigestErrs(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -395,7 +389,7 @@ func TestMarshalUnfinishedSignature(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -418,7 +412,7 @@ func TestMarshalEmptySignature(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -463,7 +457,7 @@ func TestVerifyUnfinishedSignature(t *testing.T) { input := []byte("foobarbaz1234abcd") // init a signer, don't care which one, taking this one because p256 is fast - s, err := New(validSignerConfigs[3], nil) + s, err := New(validSignerConfigs[3]) if err != nil { t.Fatalf("failed to initialize signer: %v", err) } @@ -487,7 +481,7 @@ func TestSignFileWithCOSESignatures(t *testing.T) { input := unsignedBootstrap // initialize a signer testcase := validSignerConfigs[0] - s, err := New(testcase, nil) + s, err := New(testcase) if err != nil { t.Fatalf("signer initialization failed with: %v", err) } diff --git a/stats.go b/stats.go index 25f1c6b31..533cf2e92 100644 --- a/stats.go +++ b/stats.go @@ -5,115 +5,101 @@ import ( "net/http" "sync/atomic" - "github.com/DataDog/datadog-go/v5/statsd" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - log "github.com/sirupsen/logrus" ) +const statsNamespace = "autograph" + var ( - // TODO(AUT-393): remove the statsd and prometheus counter using this name - // once we're done testing. - foobarTestCounterName = "foobar_test" - foobarTestCounter = promauto.NewCounter(prometheus.CounterOpts{ - Name: foobarTestCounterName, - Help: "A counter used for testing how prometheus and statsd metrics differ", - }) - promOnlyFoobarTestCounterName = promauto.NewCounter(prometheus.CounterOpts{ - Name: "prom_only_foobar_test", - Help: "A counter used for testing how prometheus and statsd metrics differ", - }) + requestCounter = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "requests", + Namespace: statsNamespace, + Help: "A counter for how many requests are made to a given handler", + }, []string{"handler"}) signerRequestsCounter = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "signer_requests", - Help: "A counter for how many authenticated and authorized requests are made to a given signer", + Name: "signer_requests", + Namespace: statsNamespace, + Help: "A counter for how many authenticated and authorized requests are made to a given signer", }, []string{"keyid", "user", "used_default_signer"}) -) - -func loadStatsd(conf configuration) (*statsd.Client, error) { - statsdClient, err := statsd.New(conf.Statsd.Addr, statsd.WithNamespace(conf.Statsd.Namespace)) - if err != nil { - return nil, fmt.Errorf("error constructing statsdClient: %w", err) - } - return statsdClient, nil -} - -func (a *autographer) addStats(conf configuration) error { - if conf.Statsd.Addr == "" { - // a.stats is set to a safe value in newAutographer, so we leave it - // alone and return. - log.Infof("Statsd left disabled as no `statsd.addr` was provided in config") - return nil - } - - stats, err := loadStatsd(conf) - if err != nil { - return err - } - a.stats = stats - log.Infof("Statsd enabled at %s with namespace %s", conf.Statsd.Addr, conf.Statsd.Namespace) - return nil -} + signerRequestsTiming = promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "signer_request_timing", + Namespace: statsNamespace, + Help: "A summary vector for request timing", + }, []string{"step"}) + + responseStatusCounter = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "response_status", + Namespace: statsNamespace, + Help: "A counter for response status codes for a given handler", + }, []string{"handler", "statusCode"}) + + responseSuccessCounter = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "response_success", + Namespace: statsNamespace, + Help: "A counter for succesful vs failed response status codes", + }, []string{"handler", "status"}) +) -// newStatsdWriter returns a new http.ResponseWriter that sends HTTP response -// statuses as metrics to statsd. The metric emitted is the given metricPrefix -// suffixed with ".response.status.". The whole metric for -// "myhandler" will be something like "myhandler.response.status.200". The -// returned http.ResponseWriter doesn't support the http.Flusher or -// http.Hijacker type. -func newStatsdWriter(w http.ResponseWriter, metricPrefix string, stats statsd.ClientInterface) *statsdWriter { - return &statsdWriter{ResponseWriter: w, metricPrefix: metricPrefix, stats: stats, headerWritten: new(atomic.Bool)} +// newStatsWriter returns a new http.ResponseWriter that sends HTTP response +// statuses as metrics to prometheus. The metric emitted is the given metric +// labeled with the status code and handler name. The returned +// http.ResponseWriter doesn't support the http.Flusher or http.Hijacker type. +func newStatsWriter(w http.ResponseWriter, handlerName string) *statsWriter { + return &statsWriter{ResponseWriter: w, handlerName: handlerName, headerWritten: new(atomic.Bool)} } -var _ http.ResponseWriter = &statsdWriter{} +var _ http.ResponseWriter = &statsWriter{} -type statsdWriter struct { +type statsWriter struct { http.ResponseWriter - metricPrefix string - stats statsd.ClientInterface - + handlerName string headerWritten *atomic.Bool } -func (w *statsdWriter) Write(b []byte) (int, error) { +func (w *statsWriter) Write(b []byte) (int, error) { if !w.headerWritten.Load() { w.WriteHeader(http.StatusOK) } return w.ResponseWriter.Write(b) } -func (w *statsdWriter) WriteHeader(statusCode int) { +func (w *statsWriter) WriteHeader(statusCode int) { if w.headerWritten.CompareAndSwap(false, true) { - switch { - case statusCode >= 200 && statusCode < 300: - w.stats.Incr(fmt.Sprintf("%s.response.status.2xx", w.metricPrefix), nil, 1) - w.stats.Incr(fmt.Sprintf("%s.response.success", w.metricPrefix), nil, 1) - case statusCode >= 300 && statusCode < 400: - w.stats.Incr(fmt.Sprintf("%s.response.status.3xx", w.metricPrefix), nil, 1) - case statusCode >= 400 && statusCode < 500: - w.stats.Incr(fmt.Sprintf("%s.response.status.4xx", w.metricPrefix), nil, 1) - // 4xx is a success code for availability since this is - // generally folks messing up their authentication. Still want - // to have these on a dashboard as a double check, though. - w.stats.Incr(fmt.Sprintf("%s.response.success", w.metricPrefix), nil, 1) - case statusCode >= 500 && statusCode < 600: - w.stats.Incr(fmt.Sprintf("%s.response.status.5xx", w.metricPrefix), nil, 1) + responseStatusCounter.With(prometheus.Labels{ + "handler": w.handlerName, + "statusCode": fmt.Sprintf("%d", statusCode), + }).Inc() + + if statusCode >= 200 && statusCode < 300 || statusCode >= 400 && statusCode < 500 { + responseSuccessCounter.With(prometheus.Labels{ + "handler": w.handlerName, + "status": "success", + }).Inc() + } else { + responseSuccessCounter.With(prometheus.Labels{ + "handler": w.handlerName, + "status": "failure", + }).Inc() } - w.stats.Incr(fmt.Sprintf("%s.response.status.%d", w.metricPrefix, statusCode), nil, 1) + w.ResponseWriter.WriteHeader(statusCode) } } -// statsMiddleware is an HTTP handler for emitting a statsd metric of request +// statsMiddleware is an HTTP handler for emitting a metric of request // attempts and returns an http.ResponseWriter for recording HTTP response -// status codes with newStatsdWriter. It also emits a metric for how many +// status codes with newStatsWriter. It also emits a metric for how many // requests it has received (before attemping to process those requests) called // ".request.attempts". -func statsMiddleware(h http.HandlerFunc, handlerName string, stats statsd.ClientInterface) http.HandlerFunc { +func statsMiddleware(h http.HandlerFunc, handlerName string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - stats.Incr(handlerName+".request.attempts", nil, 1) - w = newStatsdWriter(w, handlerName, stats) + requestCounter.With(prometheus.Labels{ + "handler": handlerName, + }).Inc() + w = newStatsWriter(w, handlerName) h(w, r) } } @@ -127,7 +113,7 @@ func statsMiddleware(h http.HandlerFunc, handlerName string, stats statsd.Client // allow us to sum over the individual http.api.* API request metrics. So, we do // the aggregation ourselves. The "agg" is short for "aggregated". The // handlerName provided should still include "http.api". -func apiStatsMiddleware(h http.HandlerFunc, handlerName string, stats statsd.ClientInterface) http.HandlerFunc { - handlerFunc := statsMiddleware(h, handlerName, stats) - return statsMiddleware(handlerFunc, "agg.http.api", stats) +func apiStatsMiddleware(h http.HandlerFunc, handlerName string) http.HandlerFunc { + handlerFunc := statsMiddleware(h, handlerName) + return statsMiddleware(handlerFunc, "agg.http.api") } diff --git a/stats_test.go b/stats_test.go index 7098718fc..90fd997f3 100644 --- a/stats_test.go +++ b/stats_test.go @@ -5,22 +5,18 @@ import ( "net/http/httptest" "testing" - // We generated our own, but the latest DataDog statsd v5 package has their own - // in a `mocks` package there. - "github.com/mozilla-services/autograph/internal/mockstatsd" + "github.com/prometheus/client_golang/prometheus/testutil" "go.uber.org/mock/gomock" ) func TestStatsResponseWriterWritesResponseMetricOnce(t *testing.T) { + responseSuccessCounter.Reset() + responseStatusCounter.Reset() ctrl := gomock.NewController(t) defer ctrl.Finish() - mockStats := mockstatsd.NewMockClientInterface(ctrl) - mockStats.EXPECT().Incr("myhandler.response.status.4xx", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("myhandler.response.success", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("myhandler.response.status.400", []string(nil), 1.0).Times(1) recorder := httptest.NewRecorder() - statsWriter := newStatsdWriter(recorder, "myhandler", mockStats) + statsWriter := newStatsWriter(recorder, "myhandler") statsWriter.WriteHeader(http.StatusBadRequest) if recorder.Code != http.StatusBadRequest { t.Fatalf("expected status code %d, got %d", http.StatusBadRequest, recorder.Code) @@ -30,40 +26,67 @@ func TestStatsResponseWriterWritesResponseMetricOnce(t *testing.T) { if recorder.Code != http.StatusBadRequest { t.Fatalf("tried to write to the headers again: Expected status code %d, got %d", http.StatusBadRequest, recorder.Code) } + + if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success")) != float64(1) { + t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success"))) + } + + if testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "400")) != float64(1) { + t.Fatalf("Expected responseStatusCounter to be 1, got %f", testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "400"))) + } } func TestStatsResponseWriterWritesToHeaderOnWrite(t *testing.T) { + responseSuccessCounter.Reset() + responseStatusCounter.Reset() ctrl := gomock.NewController(t) defer ctrl.Finish() - mockStats := mockstatsd.NewMockClientInterface(ctrl) - mockStats.EXPECT().Incr("myhandler.response.status.2xx", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("myhandler.response.success", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("myhandler.response.status.200", []string(nil), 1.0).Times(1) recorder := httptest.NewRecorder() - statsWriter := newStatsdWriter(recorder, "myhandler", mockStats) + statsWriter := newStatsWriter(recorder, "myhandler") statsWriter.Write([]byte("hello")) if recorder.Code != http.StatusOK { t.Fatalf("expected status code %d, got %d", http.StatusOK, recorder.Code) } + + if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success")) != float64(1) { + t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success"))) + } + + if testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "200")) != float64(1) { + t.Fatalf("Expected responseStatusCounter to be 1, got %f", testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "200"))) + } } func TestWrappingStatsResponseWriteWritesAllMetrics(t *testing.T) { + responseSuccessCounter.Reset() + responseStatusCounter.Reset() ctrl := gomock.NewController(t) defer ctrl.Finish() - mockStats := mockstatsd.NewMockClientInterface(ctrl) - mockStats.EXPECT().Incr("inner.response.status.5xx", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("inner.response.status.500", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("wrapper.response.status.5xx", []string(nil), 1.0).Times(1) - mockStats.EXPECT().Incr("wrapper.response.status.500", []string(nil), 1.0).Times(1) recorder := httptest.NewRecorder() - inner := newStatsdWriter(recorder, "inner", mockStats) - wrapper := newStatsdWriter(inner, "wrapper", mockStats) + inner := newStatsWriter(recorder, "inner") + wrapper := newStatsWriter(inner, "wrapper") wrapper.WriteHeader(http.StatusInternalServerError) if recorder.Code != http.StatusInternalServerError { t.Fatalf("expected status code %d, got %d", http.StatusInternalServerError, recorder.Code) } + + if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("wrapper", "failure")) != float64(1) { + t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "failure"))) + } + + if testutil.ToFloat64(responseStatusCounter.WithLabelValues("wrapper", "500")) != float64(1) { + t.Fatalf("Expected responseStatusCounter to be 1, got %f", testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "500"))) + } + + if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("inner", "failure")) != float64(1) { + t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "failure"))) + } + + if testutil.ToFloat64(responseStatusCounter.WithLabelValues("inner", "500")) != float64(1) { + t.Fatalf("Expected responseStatusCounter to be 1, got %f", testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "500"))) + } } diff --git a/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt deleted file mode 100644 index 97cd06d7f..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Datadog, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md b/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md deleted file mode 100644 index 2fc899687..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## Overview - -Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags -and histograms. diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go deleted file mode 100644 index 33eb930ae..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go +++ /dev/null @@ -1,298 +0,0 @@ -package statsd - -import ( - "strings" - "sync" - "sync/atomic" - "time" -) - -type ( - countsMap map[string]*countMetric - gaugesMap map[string]*gaugeMetric - setsMap map[string]*setMetric - bufferedMetricMap map[string]*bufferedMetric -) - -type aggregator struct { - nbContextGauge uint64 - nbContextCount uint64 - nbContextSet uint64 - - countsM sync.RWMutex - gaugesM sync.RWMutex - setsM sync.RWMutex - - gauges gaugesMap - counts countsMap - sets setsMap - histograms bufferedMetricContexts - distributions bufferedMetricContexts - timings bufferedMetricContexts - - closed chan struct{} - - client *Client - - // aggregator implements channelMode mechanism to receive histograms, - // distributions and timings. Since they need sampling they need to - // lock for random. When using both channelMode and ExtendedAggregation - // we don't want goroutine to fight over the lock. - inputMetrics chan metric - stopChannelMode chan struct{} - wg sync.WaitGroup -} - -func newAggregator(c *Client, maxSamplesPerContext int64) *aggregator { - return &aggregator{ - client: c, - counts: countsMap{}, - gauges: gaugesMap{}, - sets: setsMap{}, - histograms: newBufferedContexts(newHistogramMetric, maxSamplesPerContext), - distributions: newBufferedContexts(newDistributionMetric, maxSamplesPerContext), - timings: newBufferedContexts(newTimingMetric, maxSamplesPerContext), - closed: make(chan struct{}), - stopChannelMode: make(chan struct{}), - } -} - -func (a *aggregator) start(flushInterval time.Duration) { - ticker := time.NewTicker(flushInterval) - - go func() { - for { - select { - case <-ticker.C: - a.flush() - case <-a.closed: - ticker.Stop() - return - } - } - }() -} - -func (a *aggregator) startReceivingMetric(bufferSize int, nbWorkers int) { - a.inputMetrics = make(chan metric, bufferSize) - for i := 0; i < nbWorkers; i++ { - a.wg.Add(1) - go a.pullMetric() - } -} - -func (a *aggregator) stopReceivingMetric() { - close(a.stopChannelMode) - a.wg.Wait() -} - -func (a *aggregator) stop() { - a.closed <- struct{}{} -} - -func (a *aggregator) pullMetric() { - for { - select { - case m := <-a.inputMetrics: - switch m.metricType { - case histogram: - a.histogram(m.name, m.fvalue, m.tags, m.rate) - case distribution: - a.distribution(m.name, m.fvalue, m.tags, m.rate) - case timing: - a.timing(m.name, m.fvalue, m.tags, m.rate) - } - case <-a.stopChannelMode: - a.wg.Done() - return - } - } -} - -func (a *aggregator) flush() { - for _, m := range a.flushMetrics() { - a.client.sendBlocking(m) - } -} - -func (a *aggregator) flushTelemetryMetrics(t *Telemetry) { - if a == nil { - // aggregation is disabled - return - } - - t.AggregationNbContextGauge = atomic.LoadUint64(&a.nbContextGauge) - t.AggregationNbContextCount = atomic.LoadUint64(&a.nbContextCount) - t.AggregationNbContextSet = atomic.LoadUint64(&a.nbContextSet) - t.AggregationNbContextHistogram = a.histograms.getNbContext() - t.AggregationNbContextDistribution = a.distributions.getNbContext() - t.AggregationNbContextTiming = a.timings.getNbContext() -} - -func (a *aggregator) flushMetrics() []metric { - metrics := []metric{} - - // We reset the values to avoid sending 'zero' values for metrics not - // sampled during this flush interval - - a.setsM.Lock() - sets := a.sets - a.sets = setsMap{} - a.setsM.Unlock() - - for _, s := range sets { - metrics = append(metrics, s.flushUnsafe()...) - } - - a.gaugesM.Lock() - gauges := a.gauges - a.gauges = gaugesMap{} - a.gaugesM.Unlock() - - for _, g := range gauges { - metrics = append(metrics, g.flushUnsafe()) - } - - a.countsM.Lock() - counts := a.counts - a.counts = countsMap{} - a.countsM.Unlock() - - for _, c := range counts { - metrics = append(metrics, c.flushUnsafe()) - } - - metrics = a.histograms.flush(metrics) - metrics = a.distributions.flush(metrics) - metrics = a.timings.flush(metrics) - - atomic.AddUint64(&a.nbContextCount, uint64(len(counts))) - atomic.AddUint64(&a.nbContextGauge, uint64(len(gauges))) - atomic.AddUint64(&a.nbContextSet, uint64(len(sets))) - return metrics -} - -// getContext returns the context for a metric name and tags. -// -// The context is the metric name and tags separated by a separator symbol. -// It is not intended to be used as a metric name but as a unique key to aggregate -func getContext(name string, tags []string) string { - c, _ := getContextAndTags(name, tags) - return c -} - -// getContextAndTags returns the context and tags for a metric name and tags. -// -// See getContext for usage for context -// The tags are the tags separated by a separator symbol and can be re-used to pass down to the writer -func getContextAndTags(name string, tags []string) (string, string) { - if len(tags) == 0 { - return name, "" - } - n := len(name) + len(nameSeparatorSymbol) + len(tagSeparatorSymbol)*(len(tags)-1) - for _, s := range tags { - n += len(s) - } - - var sb strings.Builder - sb.Grow(n) - sb.WriteString(name) - sb.WriteString(nameSeparatorSymbol) - sb.WriteString(tags[0]) - for _, s := range tags[1:] { - sb.WriteString(tagSeparatorSymbol) - sb.WriteString(s) - } - - s := sb.String() - - return s, s[len(name)+len(nameSeparatorSymbol):] -} - -func (a *aggregator) count(name string, value int64, tags []string) error { - context := getContext(name, tags) - a.countsM.RLock() - if count, found := a.counts[context]; found { - count.sample(value) - a.countsM.RUnlock() - return nil - } - a.countsM.RUnlock() - - a.countsM.Lock() - // Check if another goroutines hasn't created the value betwen the RUnlock and 'Lock' - if count, found := a.counts[context]; found { - count.sample(value) - a.countsM.Unlock() - return nil - } - - a.counts[context] = newCountMetric(name, value, tags) - a.countsM.Unlock() - return nil -} - -func (a *aggregator) gauge(name string, value float64, tags []string) error { - context := getContext(name, tags) - a.gaugesM.RLock() - if gauge, found := a.gauges[context]; found { - gauge.sample(value) - a.gaugesM.RUnlock() - return nil - } - a.gaugesM.RUnlock() - - gauge := newGaugeMetric(name, value, tags) - - a.gaugesM.Lock() - // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock' - if gauge, found := a.gauges[context]; found { - gauge.sample(value) - a.gaugesM.Unlock() - return nil - } - a.gauges[context] = gauge - a.gaugesM.Unlock() - return nil -} - -func (a *aggregator) set(name string, value string, tags []string) error { - context := getContext(name, tags) - a.setsM.RLock() - if set, found := a.sets[context]; found { - set.sample(value) - a.setsM.RUnlock() - return nil - } - a.setsM.RUnlock() - - a.setsM.Lock() - // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock' - if set, found := a.sets[context]; found { - set.sample(value) - a.setsM.Unlock() - return nil - } - a.sets[context] = newSetMetric(name, value, tags) - a.setsM.Unlock() - return nil -} - -// Only histograms, distributions and timings are sampled with a rate since we -// only pack them in on message instead of aggregating them. Discarding the -// sample rate will have impacts on the CPU and memory usage of the Agent. - -// type alias for Client.sendToAggregator -type bufferedMetricSampleFunc func(name string, value float64, tags []string, rate float64) error - -func (a *aggregator) histogram(name string, value float64, tags []string, rate float64) error { - return a.histograms.sample(name, value, tags, rate) -} - -func (a *aggregator) distribution(name string, value float64, tags []string, rate float64) error { - return a.distributions.sample(name, value, tags, rate) -} - -func (a *aggregator) timing(name string, value float64, tags []string, rate float64) error { - return a.timings.sample(name, value, tags, rate) -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go deleted file mode 100644 index 91f2e32b9..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go +++ /dev/null @@ -1,198 +0,0 @@ -package statsd - -import ( - "strconv" -) - -// MessageTooLongError is an error returned when a sample, event or service check is too large once serialized. See -// WithMaxBytesPerPayload option for more details. -type MessageTooLongError struct{} - -func (e MessageTooLongError) Error() string { - return "message too long. See 'WithMaxBytesPerPayload' documentation." -} - -var errBufferFull = MessageTooLongError{} - -type partialWriteError string - -func (e partialWriteError) Error() string { return string(e) } - -const errPartialWrite = partialWriteError("value partially written") - -const metricOverhead = 512 - -// statsdBuffer is a buffer containing statsd messages -// this struct methods are NOT safe for concurrent use -type statsdBuffer struct { - buffer []byte - maxSize int - maxElements int - elementCount int -} - -func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer { - return &statsdBuffer{ - buffer: make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit - maxSize: maxSize, - maxElements: maxElements, - } -} - -func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64, timestamp int64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate) - b.buffer = appendTimestamp(b.buffer, timestamp) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64, timestamp int64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate) - b.buffer = appendTimestamp(b.buffer, timestamp) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -// writeAggregated serialized as many values as possible in the current buffer and return the position in values where it stopped. -func (b *statsdBuffer) writeAggregated(metricSymbol []byte, namespace string, globalTags []string, name string, values []float64, tags string, tagSize int, precision int, rate float64) (int, error) { - if b.elementCount >= b.maxElements { - return 0, errBufferFull - } - - originalBuffer := b.buffer - b.buffer = appendHeader(b.buffer, namespace, name) - - // buffer already full - if len(b.buffer)+tagSize > b.maxSize { - b.buffer = originalBuffer - return 0, errBufferFull - } - - // We add as many value as possible - var position int - for idx, v := range values { - previousBuffer := b.buffer - if idx != 0 { - b.buffer = append(b.buffer, ':') - } - - b.buffer = strconv.AppendFloat(b.buffer, v, 'f', precision, 64) - - // Should we stop serializing and switch to another buffer - if len(b.buffer)+tagSize > b.maxSize { - b.buffer = previousBuffer - break - } - position = idx + 1 - } - - // we could not add a single value - if position == 0 { - b.buffer = originalBuffer - return 0, errBufferFull - } - - b.buffer = append(b.buffer, '|') - b.buffer = append(b.buffer, metricSymbol...) - b.buffer = appendRate(b.buffer, rate) - b.buffer = appendTagsAggregated(b.buffer, globalTags, tags) - b.buffer = appendContainerID(b.buffer) - b.writeSeparator() - b.elementCount++ - - if position != len(values) { - return position, errPartialWrite - } - return position, nil - -} - -func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeEvent(event *Event, globalTags []string) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendEvent(b.buffer, event, globalTags) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeServiceCheck(serviceCheck *ServiceCheck, globalTags []string) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags) - b.writeSeparator() - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error { - if len(b.buffer) > b.maxSize { - b.buffer = originalBuffer - return errBufferFull - } - b.elementCount++ - return nil -} - -func (b *statsdBuffer) writeSeparator() { - b.buffer = append(b.buffer, '\n') -} - -func (b *statsdBuffer) reset() { - b.buffer = b.buffer[:0] - b.elementCount = 0 -} - -func (b *statsdBuffer) bytes() []byte { - return b.buffer -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go deleted file mode 100644 index 7a3e3c9d2..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go +++ /dev/null @@ -1,40 +0,0 @@ -package statsd - -type bufferPool struct { - pool chan *statsdBuffer - bufferMaxSize int - bufferMaxElements int -} - -func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool { - p := &bufferPool{ - pool: make(chan *statsdBuffer, poolSize), - bufferMaxSize: bufferMaxSize, - bufferMaxElements: bufferMaxElements, - } - for i := 0; i < poolSize; i++ { - p.addNewBuffer() - } - return p -} - -func (p *bufferPool) addNewBuffer() { - p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) -} - -func (p *bufferPool) borrowBuffer() *statsdBuffer { - select { - case b := <-p.pool: - return b - default: - return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) - } -} - -func (p *bufferPool) returnBuffer(buffer *statsdBuffer) { - buffer.reset() - select { - case p.pool <- buffer: - default: - } -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go deleted file mode 100644 index 94b31fe5b..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go +++ /dev/null @@ -1,104 +0,0 @@ -package statsd - -import ( - "math/rand" - "sync" - "sync/atomic" - "time" -) - -// bufferedMetricContexts represent the contexts for Histograms, Distributions -// and Timing. Since those 3 metric types behave the same way and are sampled -// with the same type they're represented by the same class. -type bufferedMetricContexts struct { - nbContext uint64 - mutex sync.RWMutex - values bufferedMetricMap - newMetric func(string, float64, string, float64) *bufferedMetric - - // Each bufferedMetricContexts uses its own random source and random - // lock to prevent goroutines from contending for the lock on the - // "math/rand" package-global random source (e.g. calls like - // "rand.Float64()" must acquire a shared lock to get the next - // pseudorandom number). - random *rand.Rand - randomLock sync.Mutex -} - -func newBufferedContexts(newMetric func(string, float64, string, int64, float64) *bufferedMetric, maxSamples int64) bufferedMetricContexts { - return bufferedMetricContexts{ - values: bufferedMetricMap{}, - newMetric: func(name string, value float64, stringTags string, rate float64) *bufferedMetric { - return newMetric(name, value, stringTags, maxSamples, rate) - }, - // Note that calling "time.Now().UnixNano()" repeatedly quickly may return - // very similar values. That's fine for seeding the worker-specific random - // source because we just need an evenly distributed stream of float values. - // Do not use this random source for cryptographic randomness. - random: rand.New(rand.NewSource(time.Now().UnixNano())), - } -} - -func (bc *bufferedMetricContexts) flush(metrics []metric) []metric { - bc.mutex.Lock() - values := bc.values - bc.values = bufferedMetricMap{} - bc.mutex.Unlock() - - for _, d := range values { - d.Lock() - metrics = append(metrics, d.flushUnsafe()) - d.Unlock() - } - atomic.AddUint64(&bc.nbContext, uint64(len(values))) - return metrics -} - -func (bc *bufferedMetricContexts) sample(name string, value float64, tags []string, rate float64) error { - keepingSample := shouldSample(rate, bc.random, &bc.randomLock) - - // If we don't keep the sample, return early. If we do keep the sample - // we end up storing the *first* observed sampling rate in the metric. - // This is the *wrong* behavior but it's the one we had before and the alternative would increase lock contention too - // much with the current code. - // TODO: change this behavior in the future, probably by introducing thread-local storage and lockless stuctures. - // If this code is removed, also remove the observed sampling rate in the metric and fix `bufferedMetric.flushUnsafe()` - if !keepingSample { - return nil - } - - context, stringTags := getContextAndTags(name, tags) - var v *bufferedMetric - - bc.mutex.RLock() - v, _ = bc.values[context] - bc.mutex.RUnlock() - - // Create it if it wasn't found - if v == nil { - bc.mutex.Lock() - // It might have been created by another goroutine since last call - v, _ = bc.values[context] - if v == nil { - // If we might keep a sample that we should have skipped, but that should not drastically affect performances. - bc.values[context] = bc.newMetric(name, value, stringTags, rate) - // We added a new value, we need to unlock the mutex and quit - bc.mutex.Unlock() - return nil - } - bc.mutex.Unlock() - } - - // Now we can keep the sample or skip it - if keepingSample { - v.maybeKeepSample(value, bc.random, &bc.randomLock) - } else { - v.skipSample() - } - - return nil -} - -func (bc *bufferedMetricContexts) getNbContext() uint64 { - return atomic.LoadUint64(&bc.nbContext) -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go deleted file mode 100644 index 20d69ef63..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go +++ /dev/null @@ -1,19 +0,0 @@ -package statsd - -import ( - "sync" -) - -var ( - // containerID holds the container ID. - containerID = "" - - initOnce sync.Once -) - -// getContainerID returns the container ID configured at the client creation -// It can either be auto-discovered with origin detection or provided by the user. -// User-defined container ID is prioritized. -func getContainerID() string { - return containerID -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go deleted file mode 100644 index 125132349..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go +++ /dev/null @@ -1,219 +0,0 @@ -//go:build linux -// +build linux - -package statsd - -import ( - "bufio" - "fmt" - "io" - "os" - "path" - "regexp" - "strings" - "syscall" -) - -const ( - // cgroupPath is the path to the cgroup file where we can find the container id if one exists. - cgroupPath = "/proc/self/cgroup" - - // selfMountinfo is the path to the mountinfo path where we can find the container id in case cgroup namespace is preventing the use of /proc/self/cgroup - selfMountInfoPath = "/proc/self/mountinfo" - - // defaultCgroupMountPath is the default path to the cgroup mount point. - defaultCgroupMountPath = "/sys/fs/cgroup" - - // cgroupV1BaseController is the controller used to identify the container-id for cgroup v1 - cgroupV1BaseController = "memory" - - uuidSource = "[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}" - containerSource = "[0-9a-f]{64}" - taskSource = "[0-9a-f]{32}-\\d+" - - containerdSandboxPrefix = "sandboxes" - - // ContainerRegexpStr defines the regexp used to match container IDs - // ([0-9a-f]{64}) is standard container id used pretty much everywhere - // ([0-9a-f]{32}-\d+) is container id used by AWS ECS - // ([0-9a-f]{8}(-[0-9a-f]{4}){4}$) is container id used by Garden - containerRegexpStr = "([0-9a-f]{64})|([0-9a-f]{32}-\\d+)|([0-9a-f]{8}(-[0-9a-f]{4}){4}$)" - // cIDRegexpStr defines the regexp used to match container IDs in /proc/self/mountinfo - cIDRegexpStr = `.*/([^\s/]+)/(` + containerRegexpStr + `)/[\S]*hostname` - - // From https://github.com/torvalds/linux/blob/5859a2b1991101d6b978f3feb5325dad39421f29/include/linux/proc_ns.h#L41-L49 - // Currently, host namespace inode number are hardcoded, which can be used to detect - // if we're running in host namespace or not (does not work when running in DinD) - hostCgroupNamespaceInode = 0xEFFFFFFB -) - -var ( - // expLine matches a line in the /proc/self/cgroup file. It has a submatch for the last element (path), which contains the container ID. - expLine = regexp.MustCompile(`^\d+:[^:]*:(.+)$`) - - // expContainerID matches contained IDs and sources. Source: https://github.com/Qard/container-info/blob/master/index.js - expContainerID = regexp.MustCompile(fmt.Sprintf(`(%s|%s|%s)(?:.scope)?$`, uuidSource, containerSource, taskSource)) - - cIDMountInfoRegexp = regexp.MustCompile(cIDRegexpStr) - - // initContainerID initializes the container ID. - initContainerID = internalInitContainerID -) - -// parseContainerID finds the first container ID reading from r and returns it. -func parseContainerID(r io.Reader) string { - scn := bufio.NewScanner(r) - for scn.Scan() { - path := expLine.FindStringSubmatch(scn.Text()) - if len(path) != 2 { - // invalid entry, continue - continue - } - if parts := expContainerID.FindStringSubmatch(path[1]); len(parts) == 2 { - return parts[1] - } - } - return "" -} - -// readContainerID attempts to return the container ID from the provided file path or empty on failure. -func readContainerID(fpath string) string { - f, err := os.Open(fpath) - if err != nil { - return "" - } - defer f.Close() - return parseContainerID(f) -} - -// Parsing /proc/self/mountinfo is not always reliable in Kubernetes+containerd (at least) -// We're still trying to use it as it may help in some cgroupv2 configurations (Docker, ECS, raw containerd) -func parseMountinfo(r io.Reader) string { - scn := bufio.NewScanner(r) - for scn.Scan() { - line := scn.Text() - allMatches := cIDMountInfoRegexp.FindAllStringSubmatch(line, -1) - if len(allMatches) == 0 { - continue - } - - // We're interest in rightmost match - matches := allMatches[len(allMatches)-1] - if len(matches) > 0 && matches[1] != containerdSandboxPrefix { - return matches[2] - } - } - - return "" -} - -func readMountinfo(path string) string { - f, err := os.Open(path) - if err != nil { - return "" - } - defer f.Close() - return parseMountinfo(f) -} - -func isHostCgroupNamespace() bool { - fi, err := os.Stat("/proc/self/ns/cgroup") - if err != nil { - return false - } - - inode := fi.Sys().(*syscall.Stat_t).Ino - - return inode == hostCgroupNamespaceInode -} - -// parseCgroupNodePath parses /proc/self/cgroup and returns a map of controller to its associated cgroup node path. -func parseCgroupNodePath(r io.Reader) map[string]string { - res := make(map[string]string) - scn := bufio.NewScanner(r) - for scn.Scan() { - line := scn.Text() - tokens := strings.Split(line, ":") - if len(tokens) != 3 { - continue - } - if tokens[1] == cgroupV1BaseController || tokens[1] == "" { - res[tokens[1]] = tokens[2] - } - } - return res -} - -// getCgroupInode returns the cgroup controller inode if it exists otherwise an empty string. -// The inode is prefixed by "in-" and is used by the agent to retrieve the container ID. -// For cgroup v1, we use the memory controller. -func getCgroupInode(cgroupMountPath, procSelfCgroupPath string) string { - // Parse /proc/self/cgroup to retrieve the paths to the memory controller (cgroupv1) and the cgroup node (cgroupv2) - f, err := os.Open(procSelfCgroupPath) - if err != nil { - return "" - } - defer f.Close() - cgroupControllersPaths := parseCgroupNodePath(f) - // Retrieve the cgroup inode from /sys/fs/cgroup+controller+cgroupNodePath - for _, controller := range []string{cgroupV1BaseController, ""} { - cgroupNodePath, ok := cgroupControllersPaths[controller] - if !ok { - continue - } - inode := inodeForPath(path.Join(cgroupMountPath, controller, cgroupNodePath)) - if inode != "" { - return inode - } - } - return "" -} - -// inodeForPath returns the inode for the provided path or empty on failure. -func inodeForPath(path string) string { - fi, err := os.Stat(path) - if err != nil { - return "" - } - stats, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return "" - } - return fmt.Sprintf("in-%d", stats.Ino) -} - -// internalInitContainerID initializes the container ID. -// It can either be provided by the user or read from cgroups. -func internalInitContainerID(userProvidedID string, cgroupFallback, isHostCgroupNs bool) { - initOnce.Do(func() { - readCIDOrInode(userProvidedID, cgroupPath, selfMountInfoPath, defaultCgroupMountPath, cgroupFallback, isHostCgroupNs) - }) -} - -// readCIDOrInode reads the container ID from the user provided ID, cgroups or mountinfo. -func readCIDOrInode(userProvidedID, cgroupPath, selfMountInfoPath, defaultCgroupMountPath string, cgroupFallback, isHostCgroupNs bool) { - if userProvidedID != "" { - containerID = userProvidedID - return - } - - if cgroupFallback { - containerID = readContainerID(cgroupPath) - if containerID != "" { - return - } - - containerID = readMountinfo(selfMountInfoPath) - if containerID != "" { - return - } - - // If we're in the host cgroup namespace, the cid should be retrievable in /proc/self/cgroup - // In private cgroup namespace, we can retrieve the cgroup controller inode. - if containerID == "" && isHostCgroupNs { - return - } - - containerID = getCgroupInode(defaultCgroupMountPath, cgroupPath) - } -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go deleted file mode 100644 index 29ab7f2c9..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !linux -// +build !linux - -package statsd - -func isHostCgroupNamespace() bool { - return false -} - -var initContainerID = func(userProvidedID string, _, _ bool) { - initOnce.Do(func() { - if userProvidedID != "" { - containerID = userProvidedID - return - } - }) -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/error_handler.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/error_handler.go deleted file mode 100644 index 007626273..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/error_handler.go +++ /dev/null @@ -1,22 +0,0 @@ -package statsd - -import ( - "log" -) - -func LoggingErrorHandler(err error) { - if e, ok := err.(*ErrorInputChannelFull); ok { - log.Printf( - "Input Queue is full (%d elements): %s %s dropped - %s - increase channel buffer size with `WithChannelModeBufferSize()`", - e.ChannelSize, e.Metric.name, e.Metric.tags, e.Msg, - ) - return - } else if e, ok := err.(*ErrorSenderChannelFull); ok { - log.Printf( - "Sender Queue is full (%d elements): %d metrics dropped - %s - increase sender queue size with `WithSenderQueueSize()`", - e.ChannelSize, e.LostElements, e.Msg, - ) - } else { - log.Printf("Error: %v", err) - } -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go deleted file mode 100644 index a2ca4faf7..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go +++ /dev/null @@ -1,75 +0,0 @@ -package statsd - -import ( - "fmt" - "time" -) - -// Events support -// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 -// The reason why they got exported is so that client code can directly use the types. - -// EventAlertType is the alert type for events -type EventAlertType string - -const ( - // Info is the "info" AlertType for events - Info EventAlertType = "info" - // Error is the "error" AlertType for events - Error EventAlertType = "error" - // Warning is the "warning" AlertType for events - Warning EventAlertType = "warning" - // Success is the "success" AlertType for events - Success EventAlertType = "success" -) - -// EventPriority is the event priority for events -type EventPriority string - -const ( - // Normal is the "normal" Priority for events - Normal EventPriority = "normal" - // Low is the "low" Priority for events - Low EventPriority = "low" -) - -// An Event is an object that can be posted to your DataDog event stream. -type Event struct { - // Title of the event. Required. - Title string - // Text is the description of the event. - Text string - // Timestamp is a timestamp for the event. If not provided, the dogstatsd - // server will set this to the current time. - Timestamp time.Time - // Hostname for the event. - Hostname string - // AggregationKey groups this event with others of the same key. - AggregationKey string - // Priority of the event. Can be statsd.Low or statsd.Normal. - Priority EventPriority - // SourceTypeName is a source type for the event. - SourceTypeName string - // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. - // If absent, the default value applied by the dogstatsd server is Info. - AlertType EventAlertType - // Tags for the event. - Tags []string -} - -// NewEvent creates a new event with the given title and text. Error checking -// against these values is done at send-time, or upon running e.Check. -func NewEvent(title, text string) *Event { - return &Event{ - Title: title, - Text: text, - } -} - -// Check verifies that an event is valid. -func (e *Event) Check() error { - if len(e.Title) == 0 { - return fmt.Errorf("statsd.Event title is required") - } - return nil -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go deleted file mode 100644 index 03dc8a07c..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go +++ /dev/null @@ -1,39 +0,0 @@ -package statsd - -const ( - // FNV-1a - offset32 = uint32(2166136261) - prime32 = uint32(16777619) - - // init32 is what 32 bits hash values should be initialized with. - init32 = offset32 -) - -// HashString32 returns the hash of s. -func hashString32(s string) uint32 { - return addString32(init32, s) -} - -// AddString32 adds the hash of s to the precomputed hash value h. -func addString32(h uint32, s string) uint32 { - i := 0 - n := (len(s) / 8) * 8 - - for i != n { - h = (h ^ uint32(s[i])) * prime32 - h = (h ^ uint32(s[i+1])) * prime32 - h = (h ^ uint32(s[i+2])) * prime32 - h = (h ^ uint32(s[i+3])) * prime32 - h = (h ^ uint32(s[i+4])) * prime32 - h = (h ^ uint32(s[i+5])) * prime32 - h = (h ^ uint32(s[i+6])) * prime32 - h = (h ^ uint32(s[i+7])) * prime32 - i += 8 - } - - for _, c := range s[i:] { - h = (h ^ uint32(c)) * prime32 - } - - return h -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go deleted file mode 100644 index f3ab9231f..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go +++ /dev/null @@ -1,280 +0,0 @@ -package statsd - -import ( - "strconv" - "strings" -) - -var ( - gaugeSymbol = []byte("g") - countSymbol = []byte("c") - histogramSymbol = []byte("h") - distributionSymbol = []byte("d") - setSymbol = []byte("s") - timingSymbol = []byte("ms") - tagSeparatorSymbol = "," - nameSeparatorSymbol = ":" -) - -func appendHeader(buffer []byte, namespace string, name string) []byte { - if namespace != "" { - buffer = append(buffer, namespace...) - } - buffer = append(buffer, name...) - buffer = append(buffer, ':') - return buffer -} - -func appendRate(buffer []byte, rate float64) []byte { - if rate < 1 { - buffer = append(buffer, "|@"...) - buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64) - } - return buffer -} - -func appendWithoutNewlines(buffer []byte, s string) []byte { - // fastpath for strings without newlines - if strings.IndexByte(s, '\n') == -1 { - return append(buffer, s...) - } - - for _, b := range []byte(s) { - if b != '\n' { - buffer = append(buffer, b) - } - } - return buffer -} - -func appendTags(buffer []byte, globalTags []string, tags []string) []byte { - if len(globalTags) == 0 && len(tags) == 0 { - return buffer - } - buffer = append(buffer, "|#"...) - firstTag := true - - for _, tag := range globalTags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - for _, tag := range tags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - return buffer -} - -func appendTagsAggregated(buffer []byte, globalTags []string, tags string) []byte { - if len(globalTags) == 0 && tags == "" { - return buffer - } - - buffer = append(buffer, "|#"...) - firstTag := true - - for _, tag := range globalTags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - if tags != "" { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tags) - } - return buffer -} - -func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - buffer = appendContainerID(buffer) - return buffer -} - -func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = strconv.AppendInt(buffer, value, 10) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - buffer = appendContainerID(buffer) - return buffer -} - -func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = append(buffer, value...) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - buffer = appendContainerID(buffer) - return buffer -} - -func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { - return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate) -} - -func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { - return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate) -} - -func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6) -} - -func escapedEventTextLen(text string) int { - return len(text) + strings.Count(text, "\n") -} - -func appendEscapedEventText(buffer []byte, text string) []byte { - for _, b := range []byte(text) { - if b != '\n' { - buffer = append(buffer, b) - } else { - buffer = append(buffer, "\\n"...) - } - } - return buffer -} - -func appendEvent(buffer []byte, event *Event, globalTags []string) []byte { - escapedTextLen := escapedEventTextLen(event.Text) - - buffer = append(buffer, "_e{"...) - buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10) - buffer = append(buffer, tagSeparatorSymbol...) - buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10) - buffer = append(buffer, "}:"...) - buffer = append(buffer, event.Title...) - buffer = append(buffer, '|') - if escapedTextLen != len(event.Text) { - buffer = appendEscapedEventText(buffer, event.Text) - } else { - buffer = append(buffer, event.Text...) - } - - if !event.Timestamp.IsZero() { - buffer = append(buffer, "|d:"...) - buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10) - } - - if len(event.Hostname) != 0 { - buffer = append(buffer, "|h:"...) - buffer = append(buffer, event.Hostname...) - } - - if len(event.AggregationKey) != 0 { - buffer = append(buffer, "|k:"...) - buffer = append(buffer, event.AggregationKey...) - } - - if len(event.Priority) != 0 { - buffer = append(buffer, "|p:"...) - buffer = append(buffer, event.Priority...) - } - - if len(event.SourceTypeName) != 0 { - buffer = append(buffer, "|s:"...) - buffer = append(buffer, event.SourceTypeName...) - } - - if len(event.AlertType) != 0 { - buffer = append(buffer, "|t:"...) - buffer = append(buffer, string(event.AlertType)...) - } - - buffer = appendTags(buffer, globalTags, event.Tags) - buffer = appendContainerID(buffer) - return buffer -} - -func appendEscapedServiceCheckText(buffer []byte, text string) []byte { - for i := 0; i < len(text); i++ { - if text[i] == '\n' { - buffer = append(buffer, "\\n"...) - } else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' { - buffer = append(buffer, "m\\:"...) - i++ - } else { - buffer = append(buffer, text[i]) - } - } - return buffer -} - -func appendServiceCheck(buffer []byte, serviceCheck *ServiceCheck, globalTags []string) []byte { - buffer = append(buffer, "_sc|"...) - buffer = append(buffer, serviceCheck.Name...) - buffer = append(buffer, '|') - buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10) - - if !serviceCheck.Timestamp.IsZero() { - buffer = append(buffer, "|d:"...) - buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10) - } - - if len(serviceCheck.Hostname) != 0 { - buffer = append(buffer, "|h:"...) - buffer = append(buffer, serviceCheck.Hostname...) - } - - buffer = appendTags(buffer, globalTags, serviceCheck.Tags) - - if len(serviceCheck.Message) != 0 { - buffer = append(buffer, "|m:"...) - buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message) - } - - buffer = appendContainerID(buffer) - return buffer -} - -func appendSeparator(buffer []byte) []byte { - return append(buffer, '\n') -} - -func appendContainerID(buffer []byte) []byte { - if containerID := getContainerID(); len(containerID) > 0 { - buffer = append(buffer, "|c:"...) - buffer = append(buffer, containerID...) - } - return buffer -} - -func appendTimestamp(buffer []byte, timestamp int64) []byte { - if timestamp > noTimestamp { - buffer = append(buffer, "|T"...) - buffer = strconv.AppendInt(buffer, timestamp, 10) - } - return buffer -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go deleted file mode 100644 index 3d243b7a6..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go +++ /dev/null @@ -1,268 +0,0 @@ -package statsd - -import ( - "math" - "math/rand" - "sync" - "sync/atomic" -) - -/* -Those are metrics type that can be aggregated on the client side: - - Gauge - - Count - - Set -*/ - -type countMetric struct { - value int64 - name string - tags []string -} - -func newCountMetric(name string, value int64, tags []string) *countMetric { - return &countMetric{ - value: value, - name: name, - tags: copySlice(tags), - } -} - -func (c *countMetric) sample(v int64) { - atomic.AddInt64(&c.value, v) -} - -func (c *countMetric) flushUnsafe() metric { - return metric{ - metricType: count, - name: c.name, - tags: c.tags, - rate: 1, - ivalue: c.value, - } -} - -// Gauge - -type gaugeMetric struct { - value uint64 - name string - tags []string -} - -func newGaugeMetric(name string, value float64, tags []string) *gaugeMetric { - return &gaugeMetric{ - value: math.Float64bits(value), - name: name, - tags: copySlice(tags), - } -} - -func (g *gaugeMetric) sample(v float64) { - atomic.StoreUint64(&g.value, math.Float64bits(v)) -} - -func (g *gaugeMetric) flushUnsafe() metric { - return metric{ - metricType: gauge, - name: g.name, - tags: g.tags, - rate: 1, - fvalue: math.Float64frombits(g.value), - } -} - -// Set - -type setMetric struct { - data map[string]struct{} - name string - tags []string - sync.Mutex -} - -func newSetMetric(name string, value string, tags []string) *setMetric { - set := &setMetric{ - data: map[string]struct{}{}, - name: name, - tags: copySlice(tags), - } - set.data[value] = struct{}{} - return set -} - -func (s *setMetric) sample(v string) { - s.Lock() - defer s.Unlock() - s.data[v] = struct{}{} -} - -// Sets are aggregated on the agent side too. We flush the keys so a set from -// multiple application can be correctly aggregated on the agent side. -func (s *setMetric) flushUnsafe() []metric { - if len(s.data) == 0 { - return nil - } - - metrics := make([]metric, len(s.data)) - i := 0 - for value := range s.data { - metrics[i] = metric{ - metricType: set, - name: s.name, - tags: s.tags, - rate: 1, - svalue: value, - } - i++ - } - return metrics -} - -// Histograms, Distributions and Timings - -type bufferedMetric struct { - sync.Mutex - - // Kept samples (after sampling) - data []float64 - // Total stored samples (after sampling) - storedSamples int64 - // Total number of observed samples (before sampling). This is used to keep - // the sampling rate correct. - totalSamples int64 - - name string - // Histograms and Distributions store tags as one string since we need - // to compute its size multiple time when serializing. - tags string - mtype metricType - - // maxSamples is the maximum number of samples we keep in memory - maxSamples int64 - - // The first observed user-specified sample rate. When specified - // it is used because we don't know better. - specifiedRate float64 -} - -func (s *bufferedMetric) sample(v float64) { - s.Lock() - defer s.Unlock() - s.sampleUnsafe(v) -} - -func (s *bufferedMetric) sampleUnsafe(v float64) { - s.data = append(s.data, v) - s.storedSamples++ - // Total samples needs to be incremented though an atomic because it can be accessed without the lock. - atomic.AddInt64(&s.totalSamples, 1) -} - -func (s *bufferedMetric) maybeKeepSample(v float64, rand *rand.Rand, randLock *sync.Mutex) { - s.Lock() - defer s.Unlock() - if s.maxSamples > 0 { - if s.storedSamples >= s.maxSamples { - // We reached the maximum number of samples we can keep in memory, so we randomly - // replace a sample. - randLock.Lock() - i := rand.Int63n(atomic.LoadInt64(&s.totalSamples)) - randLock.Unlock() - if i < s.maxSamples { - s.data[i] = v - } - } else { - s.data[s.storedSamples] = v - s.storedSamples++ - } - s.totalSamples++ - } else { - // This code path appends to the slice since we did not pre-allocate memory in this case. - s.sampleUnsafe(v) - } -} - -func (s *bufferedMetric) skipSample() { - atomic.AddInt64(&s.totalSamples, 1) -} - -func (s *bufferedMetric) flushUnsafe() metric { - totalSamples := atomic.LoadInt64(&s.totalSamples) - var rate float64 - - // If the user had a specified rate send it because we don't know better. - // This code should be removed once we can also remove the early return at the top of - // `bufferedMetricContexts.sample` - if s.specifiedRate != 1.0 { - rate = s.specifiedRate - } else { - rate = float64(s.storedSamples) / float64(totalSamples) - } - - return metric{ - metricType: s.mtype, - name: s.name, - stags: s.tags, - rate: rate, - fvalues: s.data[:s.storedSamples], - } -} - -type histogramMetric = bufferedMetric - -func newHistogramMetric(name string, value float64, stringTags string, maxSamples int64, rate float64) *histogramMetric { - return &histogramMetric{ - data: newData(value, maxSamples), - totalSamples: 1, - storedSamples: 1, - name: name, - tags: stringTags, - mtype: histogramAggregated, - maxSamples: maxSamples, - specifiedRate: rate, - } -} - -type distributionMetric = bufferedMetric - -func newDistributionMetric(name string, value float64, stringTags string, maxSamples int64, rate float64) *distributionMetric { - return &distributionMetric{ - data: newData(value, maxSamples), - totalSamples: 1, - storedSamples: 1, - name: name, - tags: stringTags, - mtype: distributionAggregated, - maxSamples: maxSamples, - specifiedRate: rate, - } -} - -type timingMetric = bufferedMetric - -func newTimingMetric(name string, value float64, stringTags string, maxSamples int64, rate float64) *timingMetric { - return &timingMetric{ - data: newData(value, maxSamples), - totalSamples: 1, - storedSamples: 1, - name: name, - tags: stringTags, - mtype: timingAggregated, - maxSamples: maxSamples, - specifiedRate: rate, - } -} - -// newData creates a new slice of float64 with the given capacity. If maxSample -// is less than or equal to 0, it returns a slice with the given value as the -// only element. -func newData(value float64, maxSample int64) []float64 { - if maxSample <= 0 { - return []float64{value} - } else { - data := make([]float64, maxSample) - data[0] = value - return data - } -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go deleted file mode 100644 index 6500cde9a..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go +++ /dev/null @@ -1,118 +0,0 @@ -package statsd - -import "time" - -// NoOpClient is a statsd client that does nothing. Can be useful in testing -// situations for library users. -type NoOpClient struct{} - -// Gauge does nothing and returns nil -func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error { - return nil -} - -// GaugeWithTimestamp does nothing and returns nil -func (n *NoOpClient) GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error { - return nil -} - -// Count does nothing and returns nil -func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error { - return nil -} - -// CountWithTimestamp does nothing and returns nil -func (n *NoOpClient) CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error { - return nil -} - -// Histogram does nothing and returns nil -func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Distribution does nothing and returns nil -func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Decr does nothing and returns nil -func (n *NoOpClient) Decr(name string, tags []string, rate float64) error { - return nil -} - -// Incr does nothing and returns nil -func (n *NoOpClient) Incr(name string, tags []string, rate float64) error { - return nil -} - -// Set does nothing and returns nil -func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error { - return nil -} - -// Timing does nothing and returns nil -func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error { - return nil -} - -// TimeInMilliseconds does nothing and returns nil -func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Event does nothing and returns nil -func (n *NoOpClient) Event(e *Event) error { - return nil -} - -// SimpleEvent does nothing and returns nil -func (n *NoOpClient) SimpleEvent(title, text string) error { - return nil -} - -// ServiceCheck does nothing and returns nil -func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error { - return nil -} - -// SimpleServiceCheck does nothing and returns nil -func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error { - return nil -} - -// Close does nothing and returns nil -func (n *NoOpClient) Close() error { - return nil -} - -// Flush does nothing and returns nil -func (n *NoOpClient) Flush() error { - return nil -} - -// IsClosed does nothing and return false -func (n *NoOpClient) IsClosed() bool { - return false -} - -// GetTelemetry does nothing and returns an empty Telemetry -func (n *NoOpClient) GetTelemetry() Telemetry { - return Telemetry{} -} - -// Verify that NoOpClient implements the ClientInterface. -// https://golang.org/doc/faq#guarantee_satisfies_interface -var _ ClientInterface = &NoOpClient{} - -// NoOpClientDirect implements ClientDirectInterface and does nothing. -type NoOpClientDirect struct { - NoOpClient -} - -// DistributionSamples does nothing and returns nil -func (n *NoOpClientDirect) DistributionSamples(name string, values []float64, tags []string, rate float64) error { - return nil -} - -var _ ClientDirectInterface = &NoOpClientDirect{} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go deleted file mode 100644 index e007505a6..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go +++ /dev/null @@ -1,414 +0,0 @@ -package statsd - -import ( - "fmt" - "math" - "strings" - "time" -) - -var ( - defaultNamespace = "" - defaultTags = []string{} - defaultMaxBytesPerPayload = 0 - defaultMaxMessagesPerPayload = math.MaxInt32 - defaultBufferPoolSize = 0 - defaultBufferFlushInterval = 100 * time.Millisecond - defaultWorkerCount = 32 - defaultSenderQueueSize = 0 - defaultWriteTimeout = 100 * time.Millisecond - defaultConnectTimeout = 1000 * time.Millisecond - defaultTelemetry = true - defaultReceivingMode = mutexMode - defaultChannelModeBufferSize = 4096 - defaultAggregationFlushInterval = 2 * time.Second - defaultAggregation = true - defaultExtendedAggregation = false - defaultMaxBufferedSamplesPerContext = -1 - defaultOriginDetection = true - defaultChannelModeErrorsWhenFull = false - defaultErrorHandler = func(error) {} -) - -// Options contains the configuration options for a client. -type Options struct { - namespace string - tags []string - maxBytesPerPayload int - maxMessagesPerPayload int - bufferPoolSize int - bufferFlushInterval time.Duration - workersCount int - senderQueueSize int - writeTimeout time.Duration - connectTimeout time.Duration - telemetry bool - receiveMode receivingMode - channelModeBufferSize int - aggregationFlushInterval time.Duration - aggregation bool - extendedAggregation bool - maxBufferedSamplesPerContext int - telemetryAddr string - originDetection bool - containerID string - channelModeErrorsWhenFull bool - errorHandler ErrorHandler -} - -func resolveOptions(options []Option) (*Options, error) { - o := &Options{ - namespace: defaultNamespace, - tags: defaultTags, - maxBytesPerPayload: defaultMaxBytesPerPayload, - maxMessagesPerPayload: defaultMaxMessagesPerPayload, - bufferPoolSize: defaultBufferPoolSize, - bufferFlushInterval: defaultBufferFlushInterval, - workersCount: defaultWorkerCount, - senderQueueSize: defaultSenderQueueSize, - writeTimeout: defaultWriteTimeout, - connectTimeout: defaultConnectTimeout, - telemetry: defaultTelemetry, - receiveMode: defaultReceivingMode, - channelModeBufferSize: defaultChannelModeBufferSize, - aggregationFlushInterval: defaultAggregationFlushInterval, - aggregation: defaultAggregation, - extendedAggregation: defaultExtendedAggregation, - maxBufferedSamplesPerContext: defaultMaxBufferedSamplesPerContext, - originDetection: defaultOriginDetection, - channelModeErrorsWhenFull: defaultChannelModeErrorsWhenFull, - errorHandler: defaultErrorHandler, - } - - for _, option := range options { - err := option(o) - if err != nil { - return nil, err - } - } - - return o, nil -} - -// Option is a client option. Can return an error if validation fails. -type Option func(*Options) error - -// WithNamespace sets a string to be prepend to all metrics, events and service checks name. -// -// A '.' will automatically be added after the namespace if needed. For example a metrics 'test' with a namespace 'prod' -// will produce a final metric named 'prod.test'. -func WithNamespace(namespace string) Option { - return func(o *Options) error { - if strings.HasSuffix(namespace, ".") { - o.namespace = namespace - } else { - o.namespace = namespace + "." - } - return nil - } -} - -// WithTags sets global tags to be applied to every metrics, events and service checks. -func WithTags(tags []string) Option { - return func(o *Options) error { - o.tags = tags - return nil - } -} - -// WithMaxMessagesPerPayload sets the maximum number of metrics, events and/or service checks that a single payload can -// contain. -// -// The default is 'math.MaxInt32' which will most likely let the WithMaxBytesPerPayload option take precedence. This -// option can be set to `1` to create an unbuffered client (each metrics/event/service check will be send in its own -// payload to the agent). -func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { - return func(o *Options) error { - o.maxMessagesPerPayload = maxMessagesPerPayload - return nil - } -} - -// WithMaxBytesPerPayload sets the maximum number of bytes a single payload can contain. Each sample, even and service -// check must be lower than this value once serialized or an `MessageTooLongError` is returned. -// -// The default value 0 which will set the option to the optimal size for the transport protocol used: 1432 for UDP and -// named pipe and 8192 for UDS. Those values offer the best performances. -// Be careful when changing this option, see -// https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#ensure-proper-packet-sizes. -func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option { - return func(o *Options) error { - o.maxBytesPerPayload = MaxBytesPerPayload - return nil - } -} - -// WithBufferPoolSize sets the size of the pool of buffers used to serialized metrics, events and service_checks. -// -// The default, 0, will set the option to the optimal size for the transport protocol used: 2048 for UDP and named pipe -// and 512 for UDS. -func WithBufferPoolSize(bufferPoolSize int) Option { - return func(o *Options) error { - o.bufferPoolSize = bufferPoolSize - return nil - } -} - -// WithBufferFlushInterval sets the interval after which the current buffer is flushed. -// -// A buffers are used to serialized data, they're flushed either when full (see WithMaxBytesPerPayload) or when it's -// been open for longer than this interval. -// -// With apps sending a high number of metrics/events/service_checks the interval rarely timeout. But with slow sending -// apps increasing this value will reduce the number of payload sent on the wire as more data is serialized in the same -// payload. -// -// Default is 100ms -func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option { - return func(o *Options) error { - o.bufferFlushInterval = bufferFlushInterval - return nil - } -} - -// WithWorkersCount sets the number of workers that will be used to serialized data. -// -// Those workers allow the use of multiple buffers at the same time (see WithBufferPoolSize) to reduce lock contention. -// -// Default is 32. -func WithWorkersCount(workersCount int) Option { - return func(o *Options) error { - if workersCount < 1 { - return fmt.Errorf("workersCount must be a positive integer") - } - o.workersCount = workersCount - return nil - } -} - -// WithSenderQueueSize sets the size of the sender queue in number of buffers. -// -// After data has been serialized in a buffer they're pushed to a queue that the sender will consume and then each one -// ot the agent. -// -// The default value 0 will set the option to the optimal size for the transport protocol used: 2048 for UDP and named -// pipe and 512 for UDS. -func WithSenderQueueSize(senderQueueSize int) Option { - return func(o *Options) error { - o.senderQueueSize = senderQueueSize - return nil - } -} - -// WithWriteTimeout sets the timeout for network communication with the Agent, after this interval a payload is -// dropped. This is only used for UDS and named pipes connection. -func WithWriteTimeout(writeTimeout time.Duration) Option { - return func(o *Options) error { - o.writeTimeout = writeTimeout - return nil - } -} - -// WithConnectTimeout sets the timeout for network connection with the Agent, after this interval the connection -// attempt is aborted. This is only used for UDS connection. This will also reset the connection if nothing can be -// written to it for this duration. -func WithConnectTimeout(connectTimeout time.Duration) Option { - return func(o *Options) error { - o.connectTimeout = connectTimeout - return nil - } -} - -// WithChannelMode make the client use channels to receive metrics -// -// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method). -// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the -// metric can be handled (WithMutexMode option). By default, the client use mutexes. -// -// WithChannelMode uses a channel (see WithChannelModeBufferSize to configure its size) to receive metrics and drops metrics if -// the channel is full. Sending metrics in this mode is much slower that WithMutexMode (because of the channel), but will not -// block the application. This mode is made for application using statsd directly into the application code instead of -// a separated periodic reporter. The goal is to not slow down the application at the cost of dropping metrics and having a lower max -// throughput. -func WithChannelMode() Option { - return func(o *Options) error { - o.receiveMode = channelMode - return nil - } -} - -// WithMutexMode will use mutex to receive metrics from the app through the API. -// -// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method). -// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the -// metric can be handled (WithMutexMode option). By default the client use mutexes. -// -// WithMutexMode uses mutexes to receive metrics which is much faster than channels but can cause some lock contention -// when used with a high number of goroutines sending the same metrics. Mutexes are sharded based on the metrics name -// which limit mutex contention when multiple goroutines send different metrics (see WithWorkersCount). This is the -// default behavior which will produce the best throughput. -func WithMutexMode() Option { - return func(o *Options) error { - o.receiveMode = mutexMode - return nil - } -} - -// WithChannelModeBufferSize sets the size of the channel holding incoming metrics when WithChannelMode is used. -func WithChannelModeBufferSize(bufferSize int) Option { - return func(o *Options) error { - o.channelModeBufferSize = bufferSize - return nil - } -} - -// WithChannelModeErrorsWhenFull makes the client return an error when the channel is full. -// This should be enabled if you want to be notified when the client is dropping metrics. You -// will also need to set `WithErrorHandler` to be notified of sender error. This might have -// a small performance impact. -func WithChannelModeErrorsWhenFull() Option { - return func(o *Options) error { - o.channelModeErrorsWhenFull = true - return nil - } -} - -// WithoutChannelModeErrorsWhenFull makes the client not return an error when the channel is full. -func WithoutChannelModeErrorsWhenFull() Option { - return func(o *Options) error { - o.channelModeErrorsWhenFull = false - return nil - } -} - -// WithErrorHandler sets a function that will be called when an error occurs. -func WithErrorHandler(errorHandler ErrorHandler) Option { - return func(o *Options) error { - o.errorHandler = errorHandler - return nil - } -} - -// WithAggregationInterval sets the interval at which aggregated metrics are flushed. See WithClientSideAggregation and -// WithExtendedClientSideAggregation for more. -// -// The default interval is 2s. The interval must divide the Agent reporting period (default=10s) evenly to reduce "aliasing" -// that can cause values to appear irregular/spiky. -// -// For example a 3s aggregation interval will create spikes in the final graph: a application sending a count metric -// that increments at a constant 1000 time per second will appear noisy with an interval of 3s. This is because -// client-side aggregation would report every 3 seconds, while the agent is reporting every 10 seconds. This means in -// each agent bucket, the values are: 9000, 9000, 12000. -func WithAggregationInterval(interval time.Duration) Option { - return func(o *Options) error { - o.aggregationFlushInterval = interval - return nil - } -} - -// WithClientSideAggregation enables client side aggregation for Gauges, Counts and Sets. -func WithClientSideAggregation() Option { - return func(o *Options) error { - o.aggregation = true - return nil - } -} - -// WithoutClientSideAggregation disables client side aggregation. -func WithoutClientSideAggregation() Option { - return func(o *Options) error { - o.aggregation = false - o.extendedAggregation = false - return nil - } -} - -// WithExtendedClientSideAggregation enables client side aggregation for all types. This feature is only compatible with -// Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0. -// When enabled, the use of `rate` with distribution is discouraged and `WithMaxSamplesPerContext()` should be used. -// If `rate` is used with different values of `rate` the resulting rate is not guaranteed to be correct. -func WithExtendedClientSideAggregation() Option { - return func(o *Options) error { - o.aggregation = true - o.extendedAggregation = true - return nil - } -} - -// WithMaxSamplesPerContext limits the number of sample for metric types that require multiple samples to be send -// over statsd to the agent, such as distributions or timings. This limits the number of sample per -// context for a distribution to a given number. Gauges and counts will not be affected as a single sample per context -// is sent with client side aggregation. -// - This will enable client side aggregation for all metrics. -// - This feature should be used with `WithExtendedClientSideAggregation` for optimal results. -func WithMaxSamplesPerContext(maxSamplesPerDistribution int) Option { - return func(o *Options) error { - o.aggregation = true - o.maxBufferedSamplesPerContext = maxSamplesPerDistribution - return nil - } -} - -// WithoutTelemetry disables the client telemetry. -// -// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry -func WithoutTelemetry() Option { - return func(o *Options) error { - o.telemetry = false - return nil - } -} - -// WithTelemetryAddr sets a different address for telemetry metrics. By default the same address as the client is used -// for telemetry. -// -// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry -func WithTelemetryAddr(addr string) Option { - return func(o *Options) error { - o.telemetryAddr = addr - return nil - } -} - -// WithoutOriginDetection disables the client origin detection. -// When enabled, the client tries to discover its container ID and sends it to the Agent -// to enrich the metrics with container tags. -// If the container id is not found and the client is running in a private cgroup namespace, the client -// sends the base cgroup controller inode. -// Origin detection can also be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false -// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows. -// -// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp -func WithoutOriginDetection() Option { - return func(o *Options) error { - o.originDetection = false - return nil - } -} - -// WithOriginDetection enables the client origin detection. -// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0. -// When enabled, the client tries to discover its container ID and sends it to the Agent -// to enrich the metrics with container tags. -// If the container id is not found and the client is running in a private cgroup namespace, the client -// sends the base cgroup controller inode. -// Origin detection can be disabled by configuring the environment variable DD_ORIGIN_DETECTION_ENABLED=false -// -// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp -func WithOriginDetection() Option { - return func(o *Options) error { - o.originDetection = true - return nil - } -} - -// WithContainerID allows passing the container ID, this will be used by the Agent to enrich metrics with container tags. -// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0. -// When configured, the provided container ID is prioritized over the container ID discovered via Origin Detection. -// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. -func WithContainerID(id string) Option { - return func(o *Options) error { - o.containerID = id - return nil - } -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go deleted file mode 100644 index 1188b00f3..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !windows -// +build !windows - -package statsd - -import ( - "errors" - "time" -) - -func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (Transport, error) { - return nil, errors.New("Windows Named Pipes are only supported on Windows") -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go deleted file mode 100644 index c27434ccf..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build windows -// +build windows - -package statsd - -import ( - "net" - "sync" - "time" - - "github.com/Microsoft/go-winio" -) - -type pipeWriter struct { - mu sync.RWMutex - conn net.Conn - timeout time.Duration - pipepath string -} - -func (p *pipeWriter) Write(data []byte) (n int, err error) { - conn, err := p.ensureConnection() - if err != nil { - return 0, err - } - - p.mu.RLock() - conn.SetWriteDeadline(time.Now().Add(p.timeout)) - p.mu.RUnlock() - - n, err = conn.Write(data) - if err != nil { - if e, ok := err.(net.Error); !ok || !e.Temporary() { - // disconnected; retry again on next attempt - p.mu.Lock() - p.conn = nil - p.mu.Unlock() - } - } - return n, err -} - -func (p *pipeWriter) ensureConnection() (net.Conn, error) { - p.mu.RLock() - conn := p.conn - p.mu.RUnlock() - if conn != nil { - return conn, nil - } - - // looks like we might need to connect - try again with write locking. - p.mu.Lock() - defer p.mu.Unlock() - if p.conn != nil { - return p.conn, nil - } - newconn, err := winio.DialPipe(p.pipepath, nil) - if err != nil { - return nil, err - } - p.conn = newconn - return newconn, nil -} - -func (p *pipeWriter) Close() error { - return p.conn.Close() -} - -// GetTransportName returns the name of the transport -func (p *pipeWriter) GetTransportName() string { - return writerWindowsPipe -} - -func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (*pipeWriter, error) { - // Defer connection establishment to first write - return &pipeWriter{ - conn: nil, - timeout: writeTimeout, - pipepath: pipepath, - }, nil -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go deleted file mode 100644 index fc80395c3..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go +++ /dev/null @@ -1,145 +0,0 @@ -package statsd - -import ( - "io" - "sync/atomic" -) - -// senderTelemetry contains telemetry about the health of the sender -type senderTelemetry struct { - totalPayloadsSent uint64 - totalPayloadsDroppedQueueFull uint64 - totalPayloadsDroppedWriter uint64 - totalBytesSent uint64 - totalBytesDroppedQueueFull uint64 - totalBytesDroppedWriter uint64 -} - -type Transport interface { - io.WriteCloser - - // GetTransportName returns the name of the transport - GetTransportName() string -} - -type sender struct { - transport Transport - pool *bufferPool - queue chan *statsdBuffer - telemetry *senderTelemetry - stop chan struct{} - flushSignal chan struct{} - errorHandler ErrorHandler -} - -type ErrorSenderChannelFull struct { - LostElements int - ChannelSize int - Msg string -} - -func (e *ErrorSenderChannelFull) Error() string { - return e.Msg -} - -func newSender(transport Transport, queueSize int, pool *bufferPool, errorHandler ErrorHandler) *sender { - sender := &sender{ - transport: transport, - pool: pool, - queue: make(chan *statsdBuffer, queueSize), - telemetry: &senderTelemetry{}, - stop: make(chan struct{}), - flushSignal: make(chan struct{}), - errorHandler: errorHandler, - } - - go sender.sendLoop() - return sender -} - -func (s *sender) send(buffer *statsdBuffer) { - select { - case s.queue <- buffer: - default: - if s.errorHandler != nil { - err := &ErrorSenderChannelFull{ - LostElements: buffer.elementCount, - ChannelSize: len(s.queue), - Msg: "Sender queue is full", - } - s.errorHandler(err) - } - atomic.AddUint64(&s.telemetry.totalPayloadsDroppedQueueFull, 1) - atomic.AddUint64(&s.telemetry.totalBytesDroppedQueueFull, uint64(len(buffer.bytes()))) - s.pool.returnBuffer(buffer) - } -} - -func (s *sender) write(buffer *statsdBuffer) { - _, err := s.transport.Write(buffer.bytes()) - if err != nil { - atomic.AddUint64(&s.telemetry.totalPayloadsDroppedWriter, 1) - atomic.AddUint64(&s.telemetry.totalBytesDroppedWriter, uint64(len(buffer.bytes()))) - if s.errorHandler != nil { - s.errorHandler(err) - } - } else { - atomic.AddUint64(&s.telemetry.totalPayloadsSent, 1) - atomic.AddUint64(&s.telemetry.totalBytesSent, uint64(len(buffer.bytes()))) - } - s.pool.returnBuffer(buffer) -} - -func (s *sender) flushTelemetryMetrics(t *Telemetry) { - t.TotalPayloadsSent = atomic.LoadUint64(&s.telemetry.totalPayloadsSent) - t.TotalPayloadsDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedQueueFull) - t.TotalPayloadsDroppedWriter = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedWriter) - - t.TotalBytesSent = atomic.LoadUint64(&s.telemetry.totalBytesSent) - t.TotalBytesDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalBytesDroppedQueueFull) - t.TotalBytesDroppedWriter = atomic.LoadUint64(&s.telemetry.totalBytesDroppedWriter) -} - -func (s *sender) sendLoop() { - defer close(s.stop) - for { - select { - case buffer := <-s.queue: - s.write(buffer) - case <-s.stop: - return - case <-s.flushSignal: - // At that point we know that the workers are paused (the statsd client - // will pause them before calling sender.flush()). - // So we can fully flush the input queue - s.flushInputQueue() - s.flushSignal <- struct{}{} - } - } -} - -func (s *sender) flushInputQueue() { - for { - select { - case buffer := <-s.queue: - s.write(buffer) - default: - return - } - } -} -func (s *sender) flush() { - s.flushSignal <- struct{}{} - <-s.flushSignal -} - -func (s *sender) close() error { - s.stop <- struct{}{} - <-s.stop - s.flushInputQueue() - return s.transport.Close() -} - -func (s *sender) getTransportName() string { - return s.transport.GetTransportName() -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go deleted file mode 100644 index e2850465c..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go +++ /dev/null @@ -1,57 +0,0 @@ -package statsd - -import ( - "fmt" - "time" -) - -// ServiceCheckStatus support -type ServiceCheckStatus byte - -const ( - // Ok is the "ok" ServiceCheck status - Ok ServiceCheckStatus = 0 - // Warn is the "warning" ServiceCheck status - Warn ServiceCheckStatus = 1 - // Critical is the "critical" ServiceCheck status - Critical ServiceCheckStatus = 2 - // Unknown is the "unknown" ServiceCheck status - Unknown ServiceCheckStatus = 3 -) - -// A ServiceCheck is an object that contains status of DataDog service check. -type ServiceCheck struct { - // Name of the service check. Required. - Name string - // Status of service check. Required. - Status ServiceCheckStatus - // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd - // server will set this to the current time. - Timestamp time.Time - // Hostname for the serviceCheck. - Hostname string - // A message describing the current state of the serviceCheck. - Message string - // Tags for the serviceCheck. - Tags []string -} - -// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking -// against these values is done at send-time, or upon running sc.Check. -func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { - return &ServiceCheck{ - Name: name, - Status: status, - } -} - -// Check verifies that a service check is valid. -func (sc *ServiceCheck) Check() error { - if len(sc.Name) == 0 { - return fmt.Errorf("statsd.ServiceCheck name is required") - } - if byte(sc.Status) < 0 || byte(sc.Status) > 3 { - return fmt.Errorf("statsd.ServiceCheck status has invalid value") - } - return nil -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go deleted file mode 100644 index c0137b523..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go +++ /dev/null @@ -1,907 +0,0 @@ -// Copyright 2013 Ooyala, Inc. - -/* -Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd, -adding tags and histograms and pushing upstream to Datadog. - -Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. - -statsd is based on go-statsd-client. -*/ -package statsd - -//go:generate mockgen -source=statsd.go -destination=mocks/statsd.go - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -/* -OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes -is optimal for regular networks with an MTU of 1500 so datagrams don't get -fragmented. It's generally recommended not to fragment UDP datagrams as losing -a single fragment will cause the entire datagram to be lost. -*/ -const OptimalUDPPayloadSize = 1432 - -/* -MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. -Its value comes from the calculation: 65535 bytes Max UDP datagram size - -8byte UDP header - 60byte max IP headers -any number greater than that will see frames being cut out. -*/ -const MaxUDPPayloadSize = 65467 - -// DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients. -const DefaultUDPBufferPoolSize = 2048 - -// DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients. -const DefaultUDSBufferPoolSize = 512 - -/* -DefaultMaxAgentPayloadSize is the default maximum payload size the agent -can receive. This can be adjusted by changing dogstatsd_buffer_size in the -agent configuration file datadog.yaml. This is also used as the optimal payload size -for UDS datagrams. -*/ -const DefaultMaxAgentPayloadSize = 8192 - -/* -UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket -traffic instead of UDP. The type of the socket will be guessed. -*/ -const UnixAddressPrefix = "unix://" - -/* -UnixDatagramAddressPrefix holds the prefix to use to enable Unix Domain Socket -datagram traffic instead of UDP. -*/ -const UnixAddressDatagramPrefix = "unixgram://" - -/* -UnixAddressStreamPrefix holds the prefix to use to enable Unix Domain Socket -stream traffic instead of UDP. -*/ -const UnixAddressStreamPrefix = "unixstream://" - -/* -WindowsPipeAddressPrefix holds the prefix to use to enable Windows Named Pipes -traffic instead of UDP. -*/ -const WindowsPipeAddressPrefix = `\\.\pipe\` - -var ( - AddressPrefixes = []string{UnixAddressPrefix, UnixAddressDatagramPrefix, UnixAddressStreamPrefix, WindowsPipeAddressPrefix} -) - -const ( - agentHostEnvVarName = "DD_AGENT_HOST" - agentPortEnvVarName = "DD_DOGSTATSD_PORT" - agentURLEnvVarName = "DD_DOGSTATSD_URL" - defaultUDPPort = "8125" -) - -const ( - // ddEntityID specifies client-side user-specified entity ID injection. - // This env var can be set to the Pod UID on Kubernetes via the downward API. - // Docs: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp - ddEntityID = "DD_ENTITY_ID" - - // ddEntityIDTag specifies the tag name for the client-side entity ID injection - // The Agent expects this tag to contain a non-prefixed Kubernetes Pod UID. - ddEntityIDTag = "dd.internal.entity_id" - - // originDetectionEnabled specifies the env var to enable/disable sending the container ID field. - originDetectionEnabled = "DD_ORIGIN_DETECTION_ENABLED" -) - -/* -ddEnvTagsMapping is a mapping of each "DD_" prefixed environment variable -to a specific tag name. We use a slice to keep the order and simplify tests. -*/ -var ddEnvTagsMapping = []struct{ envName, tagName string }{ - {ddEntityID, ddEntityIDTag}, // Client-side entity ID injection for container tagging. - {"DD_ENV", "env"}, // The name of the env in which the service runs. - {"DD_SERVICE", "service"}, // The name of the running service. - {"DD_VERSION", "version"}, // The current version of the running service. -} - -type metricType int - -const ( - gauge metricType = iota - count - histogram - histogramAggregated - distribution - distributionAggregated - set - timing - timingAggregated - event - serviceCheck -) - -type receivingMode int - -const ( - mutexMode receivingMode = iota - channelMode -) - -const ( - writerNameUDP string = "udp" - writerNameUDS string = "uds" - writerNameUDSStream string = "uds-stream" - writerWindowsPipe string = "pipe" - writerNameCustom string = "custom" -) - -// noTimestamp is used as a value for metric without a given timestamp. -const noTimestamp = int64(0) - -type metric struct { - metricType metricType - namespace string - globalTags []string - name string - fvalue float64 - fvalues []float64 - ivalue int64 - svalue string - evalue *Event - scvalue *ServiceCheck - tags []string - stags string - rate float64 - timestamp int64 -} - -type noClientErr string - -// ErrNoClient is returned if statsd reporting methods are invoked on -// a nil client. -const ErrNoClient = noClientErr("statsd client is nil") - -func (e noClientErr) Error() string { - return string(e) -} - -type invalidTimestampErr string - -// InvalidTimestamp is returned if a provided timestamp is invalid. -const InvalidTimestamp = invalidTimestampErr("invalid timestamp") - -func (e invalidTimestampErr) Error() string { - return string(e) -} - -// ClientInterface is an interface that exposes the common client functions for the -// purpose of being able to provide a no-op client or even mocking. This can aid -// downstream users' with their testing. -type ClientInterface interface { - // Gauge measures the value of a metric at a particular time. - Gauge(name string, value float64, tags []string, rate float64) error - - // GaugeWithTimestamp measures the value of a metric at a given time. - // BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/ - // The value will bypass any aggregation on the client side and agent side, this is - // useful when sending points in the past. - // - // Minimum Datadog Agent version: 7.40.0 - GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error - - // Count tracks how many times something happened per second. - Count(name string, value int64, tags []string, rate float64) error - - // CountWithTimestamp tracks how many times something happened at the given second. - // BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/ - // The value will bypass any aggregation on the client side and agent side, this is - // useful when sending points in the past. - // - // Minimum Datadog Agent version: 7.40.0 - CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error - - // Histogram tracks the statistical distribution of a set of values on each host. - Histogram(name string, value float64, tags []string, rate float64) error - - // Distribution tracks the statistical distribution of a set of values across your infrastructure. - // - // It is recommended to use `WithMaxBufferedMetricsPerContext` to avoid dropping metrics at high throughput, `rate` can - // also be used to limit the load. Both options can *not* be used together. - Distribution(name string, value float64, tags []string, rate float64) error - - // Decr is just Count of -1 - Decr(name string, tags []string, rate float64) error - - // Incr is just Count of 1 - Incr(name string, tags []string, rate float64) error - - // Set counts the number of unique elements in a group. - Set(name string, value string, tags []string, rate float64) error - - // Timing sends timing information, it is an alias for TimeInMilliseconds - Timing(name string, value time.Duration, tags []string, rate float64) error - - // TimeInMilliseconds sends timing information in milliseconds. - // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) - TimeInMilliseconds(name string, value float64, tags []string, rate float64) error - - // Event sends the provided Event. - Event(e *Event) error - - // SimpleEvent sends an event with the provided title and text. - SimpleEvent(title, text string) error - - // ServiceCheck sends the provided ServiceCheck. - ServiceCheck(sc *ServiceCheck) error - - // SimpleServiceCheck sends an serviceCheck with the provided name and status. - SimpleServiceCheck(name string, status ServiceCheckStatus) error - - // Close the client connection. - Close() error - - // Flush forces a flush of all the queued dogstatsd payloads. - Flush() error - - // IsClosed returns if the client has been closed. - IsClosed() bool - - // GetTelemetry return the telemetry metrics for the client since it started. - GetTelemetry() Telemetry -} - -type ErrorHandler func(error) - -// A Client is a handle for sending messages to dogstatsd. It is safe to -// use one Client from multiple goroutines simultaneously. -type Client struct { - // Sender handles the underlying networking protocol - sender *sender - // namespace to prepend to all statsd calls - namespace string - // tags are global tags to be added to every statsd call - tags []string - flushTime time.Duration - telemetry *statsdTelemetry - telemetryClient *telemetryClient - stop chan struct{} - wg sync.WaitGroup - workers []*worker - closerLock sync.Mutex - workersMode receivingMode - aggregatorMode receivingMode - agg *aggregator - aggExtended *aggregator - options []Option - addrOption string - isClosed bool - errorOnBlockedChannel bool - errorHandler ErrorHandler -} - -// statsdTelemetry contains telemetry metrics about the client -type statsdTelemetry struct { - totalMetricsGauge uint64 - totalMetricsCount uint64 - totalMetricsHistogram uint64 - totalMetricsDistribution uint64 - totalMetricsSet uint64 - totalMetricsTiming uint64 - totalEvents uint64 - totalServiceChecks uint64 - totalDroppedOnReceive uint64 -} - -// Verify that Client implements the ClientInterface. -// https://golang.org/doc/faq#guarantee_satisfies_interface -var _ ClientInterface = &Client{} - -func resolveAddr(addr string) string { - envPort := "" - - if addr == "" { - addr = os.Getenv(agentHostEnvVarName) - envPort = os.Getenv(agentPortEnvVarName) - agentURL, _ := os.LookupEnv(agentURLEnvVarName) - agentURL = parseAgentURL(agentURL) - - // agentURLEnvVarName has priority over agentHostEnvVarName - if agentURL != "" { - return agentURL - } - } - - if addr == "" { - return "" - } - - for _, prefix := range AddressPrefixes { - if strings.HasPrefix(addr, prefix) { - return addr - } - } - // TODO: How does this work for IPv6? - if strings.Contains(addr, ":") { - return addr - } - if envPort != "" { - addr = fmt.Sprintf("%s:%s", addr, envPort) - } else { - addr = fmt.Sprintf("%s:%s", addr, defaultUDPPort) - } - return addr -} - -func parseAgentURL(agentURL string) string { - if agentURL != "" { - if strings.HasPrefix(agentURL, WindowsPipeAddressPrefix) { - return agentURL - } - - parsedURL, err := url.Parse(agentURL) - if err != nil { - return "" - } - - if parsedURL.Scheme == "udp" { - if strings.Contains(parsedURL.Host, ":") { - return parsedURL.Host - } - return fmt.Sprintf("%s:%s", parsedURL.Host, defaultUDPPort) - } - - if parsedURL.Scheme == "unix" { - return agentURL - } - } - return "" -} - -func createWriter(addr string, writeTimeout time.Duration, connectTimeout time.Duration) (Transport, string, error) { - if addr == "" { - return nil, "", errors.New("No address passed and autodetection from environment failed") - } - - switch { - case strings.HasPrefix(addr, WindowsPipeAddressPrefix): - w, err := newWindowsPipeWriter(addr, writeTimeout) - return w, writerWindowsPipe, err - case strings.HasPrefix(addr, UnixAddressPrefix): - w, err := newUDSWriter(addr[len(UnixAddressPrefix):], writeTimeout, connectTimeout, "") - return w, writerNameUDS, err - case strings.HasPrefix(addr, UnixAddressDatagramPrefix): - w, err := newUDSWriter(addr[len(UnixAddressDatagramPrefix):], writeTimeout, connectTimeout, "unixgram") - return w, writerNameUDS, err - case strings.HasPrefix(addr, UnixAddressStreamPrefix): - w, err := newUDSWriter(addr[len(UnixAddressStreamPrefix):], writeTimeout, connectTimeout, "unix") - return w, writerNameUDS, err - default: - w, err := newUDPWriter(addr, writeTimeout) - return w, writerNameUDP, err - } -} - -// New returns a pointer to a new Client given an addr in the format "hostname:port" for UDP, -// "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes. -func New(addr string, options ...Option) (*Client, error) { - o, err := resolveOptions(options) - if err != nil { - return nil, err - } - - addr = resolveAddr(addr) - w, writerType, err := createWriter(addr, o.writeTimeout, o.connectTimeout) - if err != nil { - return nil, err - } - - client, err := newWithWriter(w, o, writerType) - if err == nil { - client.options = append(client.options, options...) - client.addrOption = addr - } - return client, err -} - -type customWriter struct { - io.WriteCloser -} - -func (w *customWriter) GetTransportName() string { - return writerNameCustom -} - -// NewWithWriter creates a new Client with given writer. Writer is a -// io.WriteCloser -func NewWithWriter(w io.WriteCloser, options ...Option) (*Client, error) { - o, err := resolveOptions(options) - if err != nil { - return nil, err - } - return newWithWriter(&customWriter{w}, o, writerNameCustom) -} - -// CloneWithExtraOptions create a new Client with extra options -func CloneWithExtraOptions(c *Client, options ...Option) (*Client, error) { - if c == nil { - return nil, ErrNoClient - } - - if c.addrOption == "" { - return nil, fmt.Errorf("can't clone client with no addrOption") - } - opt := append(c.options, options...) - return New(c.addrOption, opt...) -} - -func newWithWriter(w Transport, o *Options, writerName string) (*Client, error) { - c := Client{ - namespace: o.namespace, - tags: o.tags, - telemetry: &statsdTelemetry{}, - errorOnBlockedChannel: o.channelModeErrorsWhenFull, - errorHandler: o.errorHandler, - } - - // Inject values of DD_* environment variables as global tags. - for _, mapping := range ddEnvTagsMapping { - if value := os.Getenv(mapping.envName); value != "" { - c.tags = append(c.tags, fmt.Sprintf("%s:%s", mapping.tagName, value)) - } - } - - initContainerID(o.containerID, isOriginDetectionEnabled(o), isHostCgroupNamespace()) - isUDS := writerName == writerNameUDS - - if o.maxBytesPerPayload == 0 { - if isUDS { - o.maxBytesPerPayload = DefaultMaxAgentPayloadSize - } else { - o.maxBytesPerPayload = OptimalUDPPayloadSize - } - } - if o.bufferPoolSize == 0 { - if isUDS { - o.bufferPoolSize = DefaultUDSBufferPoolSize - } else { - o.bufferPoolSize = DefaultUDPBufferPoolSize - } - } - if o.senderQueueSize == 0 { - if isUDS { - o.senderQueueSize = DefaultUDSBufferPoolSize - } else { - o.senderQueueSize = DefaultUDPBufferPoolSize - } - } - - bufferPool := newBufferPool(o.bufferPoolSize, o.maxBytesPerPayload, o.maxMessagesPerPayload) - c.sender = newSender(w, o.senderQueueSize, bufferPool, o.errorHandler) - c.aggregatorMode = o.receiveMode - - c.workersMode = o.receiveMode - // channelMode mode at the worker level is not enabled when - // ExtendedAggregation is since the user app will not directly - // use the worker (the aggregator sit between the app and the - // workers). - if o.extendedAggregation { - c.workersMode = mutexMode - } - - if o.aggregation || o.extendedAggregation || o.maxBufferedSamplesPerContext > 0 { - c.agg = newAggregator(&c, int64(o.maxBufferedSamplesPerContext)) - c.agg.start(o.aggregationFlushInterval) - - if o.extendedAggregation { - c.aggExtended = c.agg - - if c.aggregatorMode == channelMode { - c.agg.startReceivingMetric(o.channelModeBufferSize, o.workersCount) - } - } - } - - for i := 0; i < o.workersCount; i++ { - w := newWorker(bufferPool, c.sender) - c.workers = append(c.workers, w) - - if c.workersMode == channelMode { - w.startReceivingMetric(o.channelModeBufferSize) - } - } - - c.flushTime = o.bufferFlushInterval - c.stop = make(chan struct{}, 1) - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.watch() - }() - - if o.telemetry { - if o.telemetryAddr == "" { - c.telemetryClient = newTelemetryClient(&c, c.agg != nil) - } else { - var err error - c.telemetryClient, err = newTelemetryClientWithCustomAddr(&c, o.telemetryAddr, c.agg != nil, bufferPool, o.writeTimeout, o.connectTimeout) - if err != nil { - return nil, err - } - } - c.telemetryClient.run(&c.wg, c.stop) - } - - return &c, nil -} - -func (c *Client) watch() { - ticker := time.NewTicker(c.flushTime) - - for { - select { - case <-ticker.C: - for _, w := range c.workers { - w.flush() - } - case <-c.stop: - ticker.Stop() - return - } - } -} - -// Flush forces a flush of all the queued dogstatsd payloads This method is -// blocking and will not return until everything is sent through the network. -// In mutexMode, this will also block sampling new data to the client while the -// workers and sender are flushed. -func (c *Client) Flush() error { - if c == nil { - return ErrNoClient - } - if c.agg != nil { - c.agg.flush() - } - for _, w := range c.workers { - w.pause() - defer w.unpause() - w.flushUnsafe() - } - // Now that the worker are pause the sender can flush the queue between - // worker and senders - c.sender.flush() - return nil -} - -// IsClosed returns if the client has been closed. -func (c *Client) IsClosed() bool { - c.closerLock.Lock() - defer c.closerLock.Unlock() - return c.isClosed -} - -func (c *Client) flushTelemetryMetrics(t *Telemetry) { - t.TotalMetricsGauge = atomic.LoadUint64(&c.telemetry.totalMetricsGauge) - t.TotalMetricsCount = atomic.LoadUint64(&c.telemetry.totalMetricsCount) - t.TotalMetricsSet = atomic.LoadUint64(&c.telemetry.totalMetricsSet) - t.TotalMetricsHistogram = atomic.LoadUint64(&c.telemetry.totalMetricsHistogram) - t.TotalMetricsDistribution = atomic.LoadUint64(&c.telemetry.totalMetricsDistribution) - t.TotalMetricsTiming = atomic.LoadUint64(&c.telemetry.totalMetricsTiming) - t.TotalEvents = atomic.LoadUint64(&c.telemetry.totalEvents) - t.TotalServiceChecks = atomic.LoadUint64(&c.telemetry.totalServiceChecks) - t.TotalDroppedOnReceive = atomic.LoadUint64(&c.telemetry.totalDroppedOnReceive) -} - -// GetTelemetry return the telemetry metrics for the client since it started. -func (c *Client) GetTelemetry() Telemetry { - return c.telemetryClient.getTelemetry() -} - -// GetTransport return the name of the transport used. -func (c *Client) GetTransport() string { - if c.sender == nil { - return "" - } - return c.sender.getTransportName() -} - -type ErrorInputChannelFull struct { - Metric metric - ChannelSize int - Msg string -} - -func (e ErrorInputChannelFull) Error() string { - return e.Msg -} - -func (c *Client) send(m metric) error { - h := hashString32(m.name) - worker := c.workers[h%uint32(len(c.workers))] - - if c.workersMode == channelMode { - select { - case worker.inputMetrics <- m: - default: - atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1) - err := &ErrorInputChannelFull{m, len(worker.inputMetrics), "Worker input channel full"} - if c.errorHandler != nil { - c.errorHandler(err) - } - if c.errorOnBlockedChannel { - return err - } - } - return nil - } - return worker.processMetric(m) -} - -// sendBlocking is used by the aggregator to inject aggregated metrics. -func (c *Client) sendBlocking(m metric) error { - m.globalTags = c.tags - m.namespace = c.namespace - - h := hashString32(m.name) - worker := c.workers[h%uint32(len(c.workers))] - return worker.processMetric(m) -} - -func (c *Client) sendToAggregator(mType metricType, name string, value float64, tags []string, rate float64, f bufferedMetricSampleFunc) error { - if c.aggregatorMode == channelMode { - m := metric{metricType: mType, name: name, fvalue: value, tags: tags, rate: rate} - select { - case c.aggExtended.inputMetrics <- m: - default: - atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1) - err := &ErrorInputChannelFull{m, len(c.aggExtended.inputMetrics), "Aggregator input channel full"} - if c.errorHandler != nil { - c.errorHandler(err) - } - if c.errorOnBlockedChannel { - return err - } - } - return nil - } - return f(name, value, tags, rate) -} - -// Gauge measures the value of a metric at a particular time. -func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1) - if c.agg != nil { - return c.agg.gauge(name, value, tags) - } - return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// GaugeWithTimestamp measures the value of a metric at a given time. -// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/ -// The value will bypass any aggregation on the client side and agent side, this is -// useful when sending points in the past. -// -// Minimum Datadog Agent version: 7.40.0 -func (c *Client) GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error { - if c == nil { - return ErrNoClient - } - - if timestamp.IsZero() || timestamp.Unix() <= noTimestamp { - return InvalidTimestamp - } - - atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1) - return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace, timestamp: timestamp.Unix()}) -} - -// Count tracks how many times something happened per second. -func (c *Client) Count(name string, value int64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsCount, 1) - if c.agg != nil { - return c.agg.count(name, value, tags) - } - return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// CountWithTimestamp tracks how many times something happened at the given second. -// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/ -// The value will bypass any aggregation on the client side and agent side, this is -// useful when sending points in the past. -// -// Minimum Datadog Agent version: 7.40.0 -func (c *Client) CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error { - if c == nil { - return ErrNoClient - } - - if timestamp.IsZero() || timestamp.Unix() <= noTimestamp { - return InvalidTimestamp - } - - atomic.AddUint64(&c.telemetry.totalMetricsCount, 1) - return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace, timestamp: timestamp.Unix()}) -} - -// Histogram tracks the statistical distribution of a set of values on each host. -func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsHistogram, 1) - if c.aggExtended != nil { - return c.sendToAggregator(histogram, name, value, tags, rate, c.aggExtended.histogram) - } - return c.send(metric{metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// Distribution tracks the statistical distribution of a set of values across your infrastructure. -func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsDistribution, 1) - if c.aggExtended != nil { - return c.sendToAggregator(distribution, name, value, tags, rate, c.aggExtended.distribution) - } - return c.send(metric{metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// Decr is just Count of -1 -func (c *Client) Decr(name string, tags []string, rate float64) error { - return c.Count(name, -1, tags, rate) -} - -// Incr is just Count of 1 -func (c *Client) Incr(name string, tags []string, rate float64) error { - return c.Count(name, 1, tags, rate) -} - -// Set counts the number of unique elements in a group. -func (c *Client) Set(name string, value string, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsSet, 1) - if c.agg != nil { - return c.agg.set(name, value, tags) - } - return c.send(metric{metricType: set, name: name, svalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// Timing sends timing information, it is an alias for TimeInMilliseconds -func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { - return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) -} - -// TimeInMilliseconds sends timing information in milliseconds. -// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) -func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsTiming, 1) - if c.aggExtended != nil { - return c.sendToAggregator(timing, name, value, tags, rate, c.aggExtended.timing) - } - return c.send(metric{metricType: timing, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) -} - -// Event sends the provided Event. -func (c *Client) Event(e *Event) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalEvents, 1) - return c.send(metric{metricType: event, evalue: e, rate: 1, globalTags: c.tags, namespace: c.namespace}) -} - -// SimpleEvent sends an event with the provided title and text. -func (c *Client) SimpleEvent(title, text string) error { - e := NewEvent(title, text) - return c.Event(e) -} - -// ServiceCheck sends the provided ServiceCheck. -func (c *Client) ServiceCheck(sc *ServiceCheck) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalServiceChecks, 1) - return c.send(metric{metricType: serviceCheck, scvalue: sc, rate: 1, globalTags: c.tags, namespace: c.namespace}) -} - -// SimpleServiceCheck sends an serviceCheck with the provided name and status. -func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { - sc := NewServiceCheck(name, status) - return c.ServiceCheck(sc) -} - -// Close the client connection. -func (c *Client) Close() error { - if c == nil { - return ErrNoClient - } - - // Acquire closer lock to ensure only one thread can close the stop channel - c.closerLock.Lock() - defer c.closerLock.Unlock() - - if c.isClosed { - return nil - } - - // Notify all other threads that they should stop - select { - case <-c.stop: - return nil - default: - } - close(c.stop) - - if c.workersMode == channelMode { - for _, w := range c.workers { - w.stopReceivingMetric() - } - } - - // flush the aggregator first - if c.agg != nil { - if c.aggExtended != nil && c.aggregatorMode == channelMode { - c.agg.stopReceivingMetric() - } - c.agg.stop() - } - - // Wait for the threads to stop - c.wg.Wait() - - c.Flush() - - c.isClosed = true - return c.sender.close() -} - -// isOriginDetectionEnabled returns whether the clients should fill the container field. -// -// Disable origin detection only in one of the following cases: -// - DD_ORIGIN_DETECTION_ENABLED is explicitly set to false -// - o.originDetection is explicitly set to false, which is true by default -func isOriginDetectionEnabled(o *Options) bool { - if !o.originDetection || o.containerID != "" { - return false - } - - envVarValue := os.Getenv(originDetectionEnabled) - if envVarValue == "" { - // DD_ORIGIN_DETECTION_ENABLED is not set - // default to true - return true - } - - enabled, err := strconv.ParseBool(envVarValue) - if err != nil { - // Error due to an unsupported DD_ORIGIN_DETECTION_ENABLED value - // default to true - return true - } - - return enabled -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd_direct.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd_direct.go deleted file mode 100644 index af66517cb..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd_direct.go +++ /dev/null @@ -1,69 +0,0 @@ -package statsd - -import ( - "io" - "strings" - "sync/atomic" -) - -type ClientDirectInterface interface { - DistributionSamples(name string, values []float64, tags []string, rate float64) error -} - -// ClientDirect is an *experimental* statsd client that gives direct access to some dogstatsd features. -// -// It is not recommended to use this client in production. This client might allow you to take advantage of -// new features in the agent before they are released, but it might also break your application. -type ClientDirect struct { - *Client -} - -// NewDirect returns a pointer to a new ClientDirect given an addr in the format "hostname:port" for UDP, -// "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes. -func NewDirect(addr string, options ...Option) (*ClientDirect, error) { - client, err := New(addr, options...) - if err != nil { - return nil, err - } - return &ClientDirect{ - client, - }, nil -} - -func NewDirectWithWriter(writer io.WriteCloser, options ...Option) (*ClientDirect, error) { - client, err := NewWithWriter(writer, options...) - if err != nil { - return nil, err - } - return &ClientDirect{ - client, - }, nil -} - -// DistributionSamples is similar to Distribution, but it lets the client deals with the sampling. -// -// The provided `rate` is the sampling rate applied by the client and will *not* be used to apply further -// sampling. This is recommended in high performance cases were the overhead of the statsd library might be -// significant and the sampling is already done by the client. -// -// `WithMaxBufferedMetricsPerContext` is ignored when using this method. -func (c *ClientDirect) DistributionSamples(name string, values []float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.telemetry.totalMetricsDistribution, uint64(len(values))) - return c.send(metric{ - metricType: distributionAggregated, - name: name, - fvalues: values, - tags: tags, - stags: strings.Join(tags, tagSeparatorSymbol), - rate: rate, - globalTags: c.tags, - namespace: c.namespace, - }) -} - -// Validate that ClientDirect implements ClientDirectInterface and ClientInterface. -var _ ClientDirectInterface = (*ClientDirect)(nil) -var _ ClientInterface = (*ClientDirect)(nil) diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go deleted file mode 100644 index feda764b5..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go +++ /dev/null @@ -1,307 +0,0 @@ -package statsd - -import ( - "fmt" - "sync" - "time" -) - -/* -telemetryInterval is the interval at which telemetry will be sent by the client. -*/ -const telemetryInterval = 10 * time.Second - -/* -clientTelemetryTag is a tag identifying this specific client. -*/ -var clientTelemetryTag = "client:go" - -/* -clientVersionTelemetryTag is a tag identifying this specific client version. -*/ -var clientVersionTelemetryTag = "client_version:5.4.0" - -// Telemetry represents internal metrics about the client behavior since it started. -type Telemetry struct { - // - // Those are produced by the 'Client' - // - - // TotalMetrics is the total number of metrics sent by the client before aggregation and sampling. - TotalMetrics uint64 - // TotalMetricsGauge is the total number of gauges sent by the client before aggregation and sampling. - TotalMetricsGauge uint64 - // TotalMetricsCount is the total number of counts sent by the client before aggregation and sampling. - TotalMetricsCount uint64 - // TotalMetricsHistogram is the total number of histograms sent by the client before aggregation and sampling. - TotalMetricsHistogram uint64 - // TotalMetricsDistribution is the total number of distributions sent by the client before aggregation and - // sampling. - TotalMetricsDistribution uint64 - // TotalMetricsSet is the total number of sets sent by the client before aggregation and sampling. - TotalMetricsSet uint64 - // TotalMetricsTiming is the total number of timings sent by the client before aggregation and sampling. - TotalMetricsTiming uint64 - // TotalEvents is the total number of events sent by the client before aggregation and sampling. - TotalEvents uint64 - // TotalServiceChecks is the total number of service_checks sent by the client before aggregation and sampling. - TotalServiceChecks uint64 - - // TotalDroppedOnReceive is the total number metrics/event/service_checks dropped when using ChannelMode (see - // WithChannelMode option). - TotalDroppedOnReceive uint64 - - // - // Those are produced by the 'sender' - // - - // TotalPayloadsSent is the total number of payload (packet on the network) succesfully sent by the client. When - // using UDP we don't know if packet dropped or not, so all packet are considered as succesfully sent. - TotalPayloadsSent uint64 - // TotalPayloadsDropped is the total number of payload dropped by the client. This includes all cause of dropped - // (TotalPayloadsDroppedQueueFull and TotalPayloadsDroppedWriter). When using UDP This won't includes the - // network dropped. - TotalPayloadsDropped uint64 - // TotalPayloadsDroppedWriter is the total number of payload dropped by the writer (when using UDS or named - // pipe) due to network timeout or error. - TotalPayloadsDroppedWriter uint64 - // TotalPayloadsDroppedQueueFull is the total number of payload dropped internally because the queue of payloads - // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on - // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size. - TotalPayloadsDroppedQueueFull uint64 - - // TotalBytesSent is the total number of bytes succesfully sent by the client. When using UDP we don't know if - // packet dropped or not, so all packet are considered as succesfully sent. - TotalBytesSent uint64 - // TotalBytesDropped is the total number of bytes dropped by the client. This includes all cause of dropped - // (TotalBytesDroppedQueueFull and TotalBytesDroppedWriter). When using UDP This - // won't includes the network dropped. - TotalBytesDropped uint64 - // TotalBytesDroppedWriter is the total number of bytes dropped by the writer (when using UDS or named pipe) due - // to network timeout or error. - TotalBytesDroppedWriter uint64 - // TotalBytesDroppedQueueFull is the total number of bytes dropped internally because the queue of payloads - // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on - // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size. - TotalBytesDroppedQueueFull uint64 - - // - // Those are produced by the 'aggregator' - // - - // AggregationNbContext is the total number of contexts flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContext uint64 - // AggregationNbContextGauge is the total number of contexts for gauges flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextGauge uint64 - // AggregationNbContextCount is the total number of contexts for counts flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextCount uint64 - // AggregationNbContextSet is the total number of contexts for sets flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextSet uint64 - // AggregationNbContextHistogram is the total number of contexts for histograms flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextHistogram uint64 - // AggregationNbContextDistribution is the total number of contexts for distributions flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextDistribution uint64 - // AggregationNbContextTiming is the total number of contexts for timings flushed by the aggregator when either - // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. - AggregationNbContextTiming uint64 -} - -type telemetryClient struct { - sync.RWMutex // used mostly to change the transport tag. - - c *Client - aggEnabled bool // is aggregation enabled and should we sent aggregation telemetry. - transport string - tags []string - tagsByType map[metricType][]string - transportTagKnown bool - sender *sender - worker *worker - lastSample Telemetry // The previous sample of telemetry sent -} - -func newTelemetryClient(c *Client, aggregationEnabled bool) *telemetryClient { - t := &telemetryClient{ - c: c, - aggEnabled: aggregationEnabled, - tags: []string{}, - tagsByType: map[metricType][]string{}, - } - - t.setTags() - return t -} - -func newTelemetryClientWithCustomAddr(c *Client, telemetryAddr string, aggregationEnabled bool, pool *bufferPool, - writeTimeout time.Duration, connectTimeout time.Duration, -) (*telemetryClient, error) { - telemetryAddr = resolveAddr(telemetryAddr) - telemetryWriter, _, err := createWriter(telemetryAddr, writeTimeout, connectTimeout) - if err != nil { - return nil, fmt.Errorf("Could not resolve telemetry address: %v", err) - } - - t := newTelemetryClient(c, aggregationEnabled) - - // Creating a custom sender/worker with 1 worker in mutex mode for the - // telemetry that share the same bufferPool. - // FIXME due to performance pitfall, we're always using UDP defaults - // even for UDS. - t.sender = newSender(telemetryWriter, DefaultUDPBufferPoolSize, pool, c.errorHandler) - t.worker = newWorker(pool, t.sender) - return t, nil -} - -func (t *telemetryClient) run(wg *sync.WaitGroup, stop chan struct{}) { - wg.Add(1) - go func() { - defer wg.Done() - ticker := time.NewTicker(telemetryInterval) - for { - select { - case <-ticker.C: - t.sendTelemetry() - case <-stop: - ticker.Stop() - if t.sender != nil { - t.sender.close() - } - return - } - } - }() -} - -func (t *telemetryClient) sendTelemetry() { - for _, m := range t.flush() { - if t.worker != nil { - t.worker.processMetric(m) - } else { - t.c.send(m) - } - } - - if t.worker != nil { - t.worker.flush() - } -} - -func (t *telemetryClient) getTelemetry() Telemetry { - if t == nil { - // telemetry was disabled through the WithoutTelemetry option - return Telemetry{} - } - - tlm := Telemetry{} - t.c.flushTelemetryMetrics(&tlm) - t.c.sender.flushTelemetryMetrics(&tlm) - t.c.agg.flushTelemetryMetrics(&tlm) - - tlm.TotalMetrics = tlm.TotalMetricsGauge + - tlm.TotalMetricsCount + - tlm.TotalMetricsSet + - tlm.TotalMetricsHistogram + - tlm.TotalMetricsDistribution + - tlm.TotalMetricsTiming - - tlm.TotalPayloadsDropped = tlm.TotalPayloadsDroppedQueueFull + tlm.TotalPayloadsDroppedWriter - tlm.TotalBytesDropped = tlm.TotalBytesDroppedQueueFull + tlm.TotalBytesDroppedWriter - - if t.aggEnabled { - tlm.AggregationNbContext = tlm.AggregationNbContextGauge + - tlm.AggregationNbContextCount + - tlm.AggregationNbContextSet + - tlm.AggregationNbContextHistogram + - tlm.AggregationNbContextDistribution + - tlm.AggregationNbContextTiming - } - return tlm -} - -// setTransportTag if it was never set and is now known. -func (t *telemetryClient) setTags() { - transport := t.c.GetTransport() - t.RLock() - // We need to refresh if we never set the tags or if the transport changed. - // For example when `unix://` is used we might return `uds` until we actually connect and detect that - // this is a UDS Stream socket and then return `uds-stream`. - needsRefresh := len(t.tags) == len(t.c.tags) || t.transport != transport - t.RUnlock() - - if !needsRefresh { - return - } - - t.Lock() - defer t.Unlock() - - t.transport = transport - t.tags = append(t.c.tags, clientTelemetryTag, clientVersionTelemetryTag) - if transport != "" { - t.tags = append(t.tags, "client_transport:"+transport) - } - t.tagsByType[gauge] = append(append([]string{}, t.tags...), "metrics_type:gauge") - t.tagsByType[count] = append(append([]string{}, t.tags...), "metrics_type:count") - t.tagsByType[set] = append(append([]string{}, t.tags...), "metrics_type:set") - t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing") - t.tagsByType[histogram] = append(append([]string{}, t.tags...), "metrics_type:histogram") - t.tagsByType[distribution] = append(append([]string{}, t.tags...), "metrics_type:distribution") -} - -// flushTelemetry returns Telemetry metrics to be flushed. It's its own function to ease testing. -func (t *telemetryClient) flush() []metric { - m := []metric{} - - // same as Count but without global namespace - telemetryCount := func(name string, value int64, tags []string) { - m = append(m, metric{metricType: count, name: name, ivalue: value, tags: tags, rate: 1}) - } - - tlm := t.getTelemetry() - t.setTags() - - // We send the diff between now and the previous telemetry flush. This keep the same telemetry behavior from V4 - // so users dashboard's aren't broken when upgrading to V5. It also allow to graph on the same dashboard a mix - // of V4 and V5 apps. - telemetryCount("datadog.dogstatsd.client.metrics", int64(tlm.TotalMetrics-t.lastSample.TotalMetrics), t.tags) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsGauge-t.lastSample.TotalMetricsGauge), t.tagsByType[gauge]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsCount-t.lastSample.TotalMetricsCount), t.tagsByType[count]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsHistogram-t.lastSample.TotalMetricsHistogram), t.tagsByType[histogram]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsDistribution-t.lastSample.TotalMetricsDistribution), t.tagsByType[distribution]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsSet-t.lastSample.TotalMetricsSet), t.tagsByType[set]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsTiming-t.lastSample.TotalMetricsTiming), t.tagsByType[timing]) - telemetryCount("datadog.dogstatsd.client.events", int64(tlm.TotalEvents-t.lastSample.TotalEvents), t.tags) - telemetryCount("datadog.dogstatsd.client.service_checks", int64(tlm.TotalServiceChecks-t.lastSample.TotalServiceChecks), t.tags) - - telemetryCount("datadog.dogstatsd.client.metric_dropped_on_receive", int64(tlm.TotalDroppedOnReceive-t.lastSample.TotalDroppedOnReceive), t.tags) - - telemetryCount("datadog.dogstatsd.client.packets_sent", int64(tlm.TotalPayloadsSent-t.lastSample.TotalPayloadsSent), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(tlm.TotalPayloadsDropped-t.lastSample.TotalPayloadsDropped), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(tlm.TotalPayloadsDroppedQueueFull-t.lastSample.TotalPayloadsDroppedQueueFull), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(tlm.TotalPayloadsDroppedWriter-t.lastSample.TotalPayloadsDroppedWriter), t.tags) - - telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(tlm.TotalBytesDropped-t.lastSample.TotalBytesDropped), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(tlm.TotalBytesSent-t.lastSample.TotalBytesSent), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(tlm.TotalBytesDroppedQueueFull-t.lastSample.TotalBytesDroppedQueueFull), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(tlm.TotalBytesDroppedWriter-t.lastSample.TotalBytesDroppedWriter), t.tags) - - if t.aggEnabled { - telemetryCount("datadog.dogstatsd.client.aggregated_context", int64(tlm.AggregationNbContext-t.lastSample.AggregationNbContext), t.tags) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextGauge-t.lastSample.AggregationNbContextGauge), t.tagsByType[gauge]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextSet-t.lastSample.AggregationNbContextSet), t.tagsByType[set]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextCount-t.lastSample.AggregationNbContextCount), t.tagsByType[count]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextHistogram-t.lastSample.AggregationNbContextHistogram), t.tagsByType[histogram]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextDistribution-t.lastSample.AggregationNbContextDistribution), t.tagsByType[distribution]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextTiming-t.lastSample.AggregationNbContextTiming), t.tagsByType[timing]) - } - - t.lastSample = tlm - - return m -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go deleted file mode 100644 index b90f75279..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go +++ /dev/null @@ -1,39 +0,0 @@ -package statsd - -import ( - "net" - "time" -) - -// udpWriter is an internal class wrapping around management of UDP connection -type udpWriter struct { - conn net.Conn -} - -// New returns a pointer to a new udpWriter given an addr in the format "hostname:port". -func newUDPWriter(addr string, _ time.Duration) (*udpWriter, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, err - } - conn, err := net.DialUDP("udp", nil, udpAddr) - if err != nil { - return nil, err - } - writer := &udpWriter{conn: conn} - return writer, nil -} - -// Write data to the UDP connection with no error handling -func (w *udpWriter) Write(data []byte) (int, error) { - return w.conn.Write(data) -} - -func (w *udpWriter) Close() error { - return w.conn.Close() -} - -// GetTransportName returns the transport used by the sender -func (w *udpWriter) GetTransportName() string { - return writerNameUDP -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go deleted file mode 100644 index 09518992a..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go +++ /dev/null @@ -1,167 +0,0 @@ -//go:build !windows -// +build !windows - -package statsd - -import ( - "encoding/binary" - "net" - "strings" - "sync" - "time" -) - -// udsWriter is an internal class wrapping around management of UDS connection -type udsWriter struct { - // Address to send metrics to, needed to allow reconnection on error - addr string - // Transport used - transport string - // Established connection object, or nil if not connected yet - conn net.Conn - // write timeout - writeTimeout time.Duration - // connect timeout - connectTimeout time.Duration - sync.RWMutex // used to lock conn / writer can replace it -} - -// newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr. -func newUDSWriter(addr string, writeTimeout time.Duration, connectTimeout time.Duration, transport string) (*udsWriter, error) { - // Defer connection to first Write - writer := &udsWriter{addr: addr, transport: transport, conn: nil, writeTimeout: writeTimeout, connectTimeout: connectTimeout} - return writer, nil -} - -// GetTransportName returns the transport used by the writer -func (w *udsWriter) GetTransportName() string { - w.RLock() - defer w.RUnlock() - - if w.transport == "unix" { - return writerNameUDSStream - } else { - return writerNameUDS - } -} - -func (w *udsWriter) shouldCloseConnection(err error, partialWrite bool) bool { - if err != nil && partialWrite { - // We can't recover from a partial write - return true - } - if err, isNetworkErr := err.(net.Error); err != nil && (!isNetworkErr || !err.Timeout()) { - // Statsd server disconnected, retry connecting at next packet - return true - } - return false -} - -// Write data to the UDS connection with write timeout and minimal error handling: -// create the connection if nil, and destroy it if the statsd server has disconnected -func (w *udsWriter) Write(data []byte) (int, error) { - var n int - partialWrite := false - conn, err := w.ensureConnection() - if err != nil { - return 0, err - } - stream := conn.LocalAddr().Network() == "unix" - - // When using streams the deadline will only make us drop the packet if we can't write it at all, - // once we've started writing we need to finish. - conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) - - // When using streams, we append the length of the packet to the data - if stream { - bs := []byte{0, 0, 0, 0} - binary.LittleEndian.PutUint32(bs, uint32(len(data))) - _, err = w.conn.Write(bs) - - partialWrite = true - - // W need to be able to finish to write partially written packets once we have started. - // But we will reset the connection if we can't write anything at all for a long time. - w.conn.SetWriteDeadline(time.Now().Add(w.connectTimeout)) - - // Continue writing only if we've written the length of the packet - if err == nil { - n, err = w.conn.Write(data) - if err == nil { - partialWrite = false - } - } - } else { - n, err = w.conn.Write(data) - } - - if w.shouldCloseConnection(err, partialWrite) { - w.unsetConnection() - } - return n, err -} - -func (w *udsWriter) Close() error { - if w.conn != nil { - return w.conn.Close() - } - return nil -} - -func (w *udsWriter) tryToDial(network string) (net.Conn, error) { - udsAddr, err := net.ResolveUnixAddr(network, w.addr) - if err != nil { - return nil, err - } - newConn, err := net.DialTimeout(udsAddr.Network(), udsAddr.String(), w.connectTimeout) - if err != nil { - return nil, err - } - return newConn, nil -} - -func (w *udsWriter) ensureConnection() (net.Conn, error) { - // Check if we've already got a socket we can use - w.RLock() - currentConn := w.conn - w.RUnlock() - - if currentConn != nil { - return currentConn, nil - } - - // Looks like we might need to connect - try again with write locking. - w.Lock() - defer w.Unlock() - if w.conn != nil { - return w.conn, nil - } - - var newConn net.Conn - var err error - - // Try to guess the transport if not specified. - if w.transport == "" { - newConn, err = w.tryToDial("unixgram") - // try to connect with unixgram failed, try again with unix streams. - if err != nil && strings.Contains(err.Error(), "protocol wrong type for socket") { - newConn, err = w.tryToDial("unix") - } - } else { - newConn, err = w.tryToDial(w.transport) - } - - if err != nil { - return nil, err - } - w.conn = newConn - w.transport = newConn.RemoteAddr().Network() - return newConn, nil -} - -func (w *udsWriter) unsetConnection() { - w.Lock() - defer w.Unlock() - _ = w.conn.Close() - w.conn = nil -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go deleted file mode 100644 index 909f5a0a0..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build windows -// +build windows - -package statsd - -import ( - "fmt" - "time" -) - -// newUDSWriter is disabled on Windows, SOCK_DGRAM are still unavailable but -// SOCK_STREAM should work once implemented in the agent (https://devblogs.microsoft.com/commandline/af_unix-comes-to-windows/) -func newUDSWriter(_ string, _ time.Duration, _ time.Duration, _ string) (Transport, error) { - return nil, fmt.Errorf("Unix socket is not available on Windows") -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go deleted file mode 100644 index 8c3ac8426..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package statsd - -import ( - "math/rand" - "sync" -) - -func shouldSample(rate float64, r *rand.Rand, lock *sync.Mutex) bool { - if rate >= 1 { - return true - } - // sources created by rand.NewSource() (ie. w.random) are not thread safe. - // TODO: use defer once the lowest Go version we support is 1.14 (defer - // has an overhead before that). - lock.Lock() - if r.Float64() > rate { - lock.Unlock() - return false - } - lock.Unlock() - return true -} - -func copySlice(src []string) []string { - if src == nil { - return nil - } - - c := make([]string, len(src)) - copy(c, src) - return c -} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go deleted file mode 100644 index 19dccd339..000000000 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go +++ /dev/null @@ -1,158 +0,0 @@ -package statsd - -import ( - "math/rand" - "sync" - "time" -) - -type worker struct { - pool *bufferPool - buffer *statsdBuffer - sender *sender - random *rand.Rand - randomLock sync.Mutex - sync.Mutex - - inputMetrics chan metric - stop chan struct{} -} - -func newWorker(pool *bufferPool, sender *sender) *worker { - // Each worker uses its own random source and random lock to prevent - // workers in separate goroutines from contending for the lock on the - // "math/rand" package-global random source (e.g. calls like - // "rand.Float64()" must acquire a shared lock to get the next - // pseudorandom number). - // Note that calling "time.Now().UnixNano()" repeatedly quickly may return - // very similar values. That's fine for seeding the worker-specific random - // source because we just need an evenly distributed stream of float values. - // Do not use this random source for cryptographic randomness. - random := rand.New(rand.NewSource(time.Now().UnixNano())) - return &worker{ - pool: pool, - sender: sender, - buffer: pool.borrowBuffer(), - random: random, - stop: make(chan struct{}), - } -} - -func (w *worker) startReceivingMetric(bufferSize int) { - w.inputMetrics = make(chan metric, bufferSize) - go w.pullMetric() -} - -func (w *worker) stopReceivingMetric() { - w.stop <- struct{}{} -} - -func (w *worker) pullMetric() { - for { - select { - case m := <-w.inputMetrics: - w.processMetric(m) - case <-w.stop: - return - } - } -} - -func (w *worker) processMetric(m metric) error { - // Aggregated metrics are already sampled. - if m.metricType != distributionAggregated && m.metricType != histogramAggregated && m.metricType != timingAggregated { - if !shouldSample(m.rate, w.random, &w.randomLock) { - return nil - } - } - w.Lock() - var err error - if err = w.writeMetricUnsafe(m); err == errBufferFull { - w.flushUnsafe() - err = w.writeMetricUnsafe(m) - } - w.Unlock() - return err -} - -func (w *worker) writeAggregatedMetricUnsafe(m metric, metricSymbol []byte, precision int, rate float64) error { - globalPos := 0 - - // first check how much data we can write to the buffer: - // +3 + len(metricSymbol) because the message will include '||#' before the tags - // +1 for the potential line break at the start of the metric - extraSize := len(m.stags) + 4 + len(metricSymbol) - if m.rate < 1 { - // +2 for "|@" - // + the maximum size of a rate (https://en.wikipedia.org/wiki/IEEE_754-1985) - extraSize += 2 + 18 - } - for _, t := range m.globalTags { - extraSize += len(t) + 1 - } - - for { - pos, err := w.buffer.writeAggregated(metricSymbol, m.namespace, m.globalTags, m.name, m.fvalues[globalPos:], m.stags, extraSize, precision, rate) - if err == errPartialWrite { - // We successfully wrote part of the histogram metrics. - // We flush the current buffer and finish the histogram - // in a new one. - w.flushUnsafe() - globalPos += pos - } else { - return err - } - } -} - -func (w *worker) writeMetricUnsafe(m metric) error { - switch m.metricType { - case gauge: - return w.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate, m.timestamp) - case count: - return w.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate, m.timestamp) - case histogram: - return w.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case distribution: - return w.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case set: - return w.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate) - case timing: - return w.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case event: - return w.buffer.writeEvent(m.evalue, m.globalTags) - case serviceCheck: - return w.buffer.writeServiceCheck(m.scvalue, m.globalTags) - case histogramAggregated: - return w.writeAggregatedMetricUnsafe(m, histogramSymbol, -1, m.rate) - case distributionAggregated: - return w.writeAggregatedMetricUnsafe(m, distributionSymbol, -1, m.rate) - case timingAggregated: - return w.writeAggregatedMetricUnsafe(m, timingSymbol, 6, m.rate) - default: - return nil - } -} - -func (w *worker) flush() { - w.Lock() - w.flushUnsafe() - w.Unlock() -} - -func (w *worker) pause() { - w.Lock() -} - -func (w *worker) unpause() { - w.Unlock() -} - -// flush the current buffer. Lock must be held by caller. -// flushed buffer written to the network asynchronously. -func (w *worker) flushUnsafe() { - if len(w.buffer.bytes()) > 0 { - w.sender.send(w.buffer) - w.buffer = w.pool.borrowBuffer() - } -} diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes deleted file mode 100644 index 94f480de9..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index 815e20660..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.vscode/ - -*.exe - -# testing -testdata - -# go workspaces -go.work -go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml deleted file mode 100644 index faedfe937..000000000 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ /dev/null @@ -1,147 +0,0 @@ -linters: - enable: - # style - - containedctx # struct contains a context - - dupl # duplicate code - - errname # erorrs are named correctly - - nolintlint # "//nolint" directives are properly explained - - revive # golint replacement - - unconvert # unnecessary conversions - - wastedassign - - # bugs, performance, unused, etc ... - - contextcheck # function uses a non-inherited context - - errorlint # errors not wrapped for 1.13 - - exhaustive # check exhaustiveness of enum switch statements - - gofmt # files are gofmt'ed - - gosec # security - - nilerr # returns nil even with non-nil error - - thelper # test helpers without t.Helper() - - unparam # unused function params - -issues: - exclude-dirs: - - pkg/etw/sample - - exclude-rules: - # err is very often shadowed in nested scopes - - linters: - - govet - text: '^shadow: declaration of "err" shadows declaration' - - # ignore long lines for skip autogen directives - - linters: - - revive - text: "^line-length-limit: " - source: "^//(go:generate|sys) " - - #TODO: remove after upgrading to go1.18 - # ignore comment spacing for nolint and sys directives - - linters: - - revive - text: "^comment-spacings: no space between comment delimiter and comment text" - source: "//(cspell:|nolint:|sys |todo)" - - # not on go 1.18 yet, so no any - - linters: - - revive - text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" - - # allow unjustified ignores of error checks in defer statements - - linters: - - nolintlint - text: "^directive `//nolint:errcheck` should provide explanation" - source: '^\s*defer ' - - # allow unjustified ignores of error lints for io.EOF - - linters: - - nolintlint - text: "^directive `//nolint:errorlint` should provide explanation" - source: '[=|!]= io.EOF' - - -linters-settings: - exhaustive: - default-signifies-exhaustive: true - govet: - enable-all: true - disable: - # struct order is often for Win32 compat - # also, ignore pointer bytes/GC issues for now until performance becomes an issue - - fieldalignment - nolintlint: - require-explanation: true - require-specific: true - revive: - # revive is more configurable than static check, so likely the preferred alternative to static-check - # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) - enable-all-rules: - true - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - # rules with required arguments - - name: argument-limit - disabled: true - - name: banned-characters - disabled: true - - name: cognitive-complexity - disabled: true - - name: cyclomatic - disabled: true - - name: file-header - disabled: true - - name: function-length - disabled: true - - name: function-result-limit - disabled: true - - name: max-public-structs - disabled: true - # geneally annoying rules - - name: add-constant # complains about any and all strings and integers - disabled: true - - name: confusing-naming # we frequently use "Foo()" and "foo()" together - disabled: true - - name: flag-parameter # excessive, and a common idiom we use - disabled: true - - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead - disabled: true - # general config - - name: line-length-limit - arguments: - - 140 - - name: var-naming - arguments: - - [] - - - CID - - CRI - - CTRD - - DACL - - DLL - - DOS - - ETW - - FSCTL - - GCS - - GMSA - - HCS - - HV - - IO - - LCOW - - LDAP - - LPAC - - LTSC - - MMIO - - NT - - OCI - - PMEM - - PWSH - - RX - - SACl - - SID - - SMB - - TX - - VHD - - VHDX - - VMID - - VPCI - - WCOW - - WIM diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b4942b..000000000 --- a/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d77..000000000 --- a/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 7474b4f0b..000000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. -Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that -you have the right to, and actually do, grant us the rights to use your contribution. -For details, visit [Microsoft CLA](https://cla.microsoft.com). - -When you submit a pull request, a CLA-bot will automatically determine whether you need to -provide a CLA and decorate the PR appropriately (e.g., label, comment). -Simply follow the instructions provided by the bot. -You will only need to do this once across all repos using our CLA. - -Additionally, the pull request pipeline requires the following steps to be performed before -mergining. - -### Code Sign-Off - -We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] -to certify they either authored the work themselves or otherwise have permission to use it in this project. - -A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. - -Please see [the developer certificate](https://developercertificate.org) for more info, -as well as to make sure that you can attest to the rules listed. -Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - -### Linting - -Code must pass a linting stage, which uses [`golangci-lint`][lint]. -The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run -automatically with VSCode by adding the following to your workspace or folder settings: - -```json - "go.lintTool": "golangci-lint", - "go.lintOnSave": "package", -``` - -Additional editor [integrations options are also available][lint-ide]. - -Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: - -```shell -# use . or specify a path to only lint a package -# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" -> golangci-lint run ./... -``` - -### Go Generate - -The pipeline checks that auto-generated code, via `go generate`, are up to date. - -This can be done for the entire repo: - -```shell -> go generate ./... -``` - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Special Thanks - -Thanks to [natefinch][natefinch] for the inspiration for this library. -See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. - -[lint]: https://golangci-lint.run/ -[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration -[lint-install]: https://golangci-lint.run/usage/install/#local-installation - -[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s -[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff - -[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md deleted file mode 100644 index 869fdfe2b..000000000 --- a/vendor/github.com/Microsoft/go-winio/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index b54341daa..000000000 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,287 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "runtime" - "unicode/utf16" - - "github.com/Microsoft/go-winio/internal/fs" - "golang.org/x/sys/windows" -) - -//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId //revive:disable-line:var-naming ID, not Id - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -//nolint:revive // var-naming: ALL_CAPS -const ( - WRITE_DAC = windows.WRITE_DAC - WRITE_OWNER = windows.WRITE_OWNER - ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - //revive:disable-next-line:var-naming ID, not Id - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamID struct { - StreamID uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(io.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamID - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamID, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = windows.UTF16ToString(name) - } - if wsi.StreamID == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamID{ - StreamID: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - h, err := fs.CreateFile(path, - fs.AccessMask(access), - fs.FileShareMode(share), - nil, - fs.FileCreationDisposition(createmode), - fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, - 0, - ) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go deleted file mode 100644 index 1f5bfe2d5..000000000 --- a/vendor/github.com/Microsoft/go-winio/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// This package provides utilities for efficiently performing Win32 IO operations in Go. -// Currently, this package is provides support for genreal IO and management of -// - named pipes -// - files -// - [Hyper-V sockets] -// -// This code is similar to Go's [net] package, and uses IO completion ports to avoid -// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. -// -// This limits support to Windows Vista and newer operating systems. -// -// Additionally, this package provides support for: -// - creating and managing GUIDs -// - writing to [ETW] -// - opening and manageing VHDs -// - parsing [Windows Image files] -// - auto-generating Win32 API code -// -// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service -// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- -// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images -package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index e104dbdfd..000000000 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return ea, nb, err - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return ea, nb, err - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return ea, nb, err -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return eas, err -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index fe82a180d..000000000 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,320 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (*timeoutError) Error() string { return "i/o timeout" } -func (*timeoutError) Timeout() bool { return true } -func (*timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort windows.Handle - -// ioResult contains the result of an asynchronous IO operation. -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO. -type ioOperation struct { - o windows.Overlapped - ch chan ioResult -} - -func initIO() { - h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle windows.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomic.Bool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomic.Bool -} - -// makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h windows.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIO) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -// Deprecated: use NewOpenFile instead. -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return NewOpenFile(windows.Handle(h)) -} - -func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle. -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.Swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - _ = cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - windows.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed. -func (f *win32File) IsClosed() bool { - return f.closing.Load() -} - -// prepareIO prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIO() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.Load() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h windows.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// todo: helsaawy - create an asyncIO version that takes a context - -// asyncIO processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno - return int(bytes), err - } - - if f.closing.Load() { - _ = cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.Load() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - _ = cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno - return 0, io.EOF - } - return n, err -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return windows.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.Store(false) - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.Store(true) - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index c860eb991..000000000 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - _ uint32 // padding -} - -// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing -// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 -// alignment is necessary to pass this as FILE_BASIC_INFO. -type alignedFileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 - FileAttributes uint32 - _ uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &alignedFileBasicInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the - // public API of this module. The data may be unnecessarily aligned. - return (*FileBasicInfo)(unsafe.Pointer(bi)), nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is - // suitable to pass to GetFileInformationByHandleEx. - biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) - if err := windows.SetFileInformationByHandle( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(&biAligned)), - uint32(unsafe.Sizeof(biAligned)), - ); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), - windows.FileStandardInfo, - (*byte)(unsafe.Pointer(si)), - uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileIdInfo, - (*byte)(unsafe.Pointer(fileID)), - uint32(unsafe.Sizeof(*fileID)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index c4fdd9d4a..000000000 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,582 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/socket" - "github.com/Microsoft/go-winio/pkg/guid" -) - -const afHVSock = 34 // AF_HYPERV - -// Well known Service and VM IDs -// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - -// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. -func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 - return guid.GUID{} -} - -// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff - return guid.GUID{ - Data1: 0xffffffff, - Data2: 0xffff, - Data3: 0xffff, - Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - } -} - -// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. -func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 - return guid.GUID{ - Data1: 0xe0e16197, - Data2: 0xdd56, - Data3: 0x4a10, - Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, - } -} - -// HvsockGUIDSiloHost is the address of a silo's host partition: -// - The silo host of a hosted silo is the utility VM. -// - The silo host of a silo on a physical host is the physical host. -func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 - return guid.GUID{ - Data1: 0x36bd0c5c, - Data2: 0x7276, - Data3: 0x4223, - Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, - } -} - -// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. -func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd - return guid.GUID{ - Data1: 0x90db8b89, - Data2: 0xd35, - Data3: 0x4f79, - Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, - } -} - -// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. -// Listening on this VmId accepts connection from: -// - Inside silos: silo host partition. -// - Inside hosted silo: host of the VM. -// - Inside VM: VM host. -// - Physical host: Not supported. -func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 - return guid.GUID{ - Data1: 0xa42e7cda, - Data2: 0xd03f, - Data3: 0x480c, - Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, - } -} - -// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. -func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 - return guid.GUID{ - Data2: 0xfacb, - Data3: 0x11e6, - Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, - } -} - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -var _ socket.RawSockaddr = &rawHvsockAddr{} - -// Network returns the address's network name, "hvsock". -func (*HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g := hvsockVsockServiceTemplate() // make a copy - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHVSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// Sockaddr returns a pointer to and the size of this struct. -// -// Implements the [socket.RawSockaddr] interface, and allows use in -// [socket.Bind] and [socket.ConnectEx]. -func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { - return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil -} - -// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. -func (r *rawHvsockAddr) FromBytes(b []byte) error { - n := int(unsafe.Sizeof(rawHvsockAddr{})) - - if len(b) < n { - return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) - } - - copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) - if r.Family != afHVSock { - return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) - } - - return nil -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -var _ net.Listener = &HvsockListener{} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -var _ net.Conn = &HvsockConn{} - -func newHVSocket() (*win32File, error) { - fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - windows.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - - var sock *win32File - sock, err = newHVSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - defer func() { - if err != nil { - _ = sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = windows.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHVSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIO() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - // - // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) - if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - - conn := &HvsockConn{ - sock: sock, - } - // The local address returned in the AcceptEx buffer is the same as the Listener socket's - // address. However, the service GUID reported by GetSockName is different from the Listeners - // socket, and is sometimes the same as the local address of the socket that dialed the - // address, with the service GUID.Data1 incremented, but othertimes is different. - // todo: does the local address matter? is the listener's address or the actual address appropriate? - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - - // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(sock.handle, - windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, - (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { - return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) - } - - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). -type HvsockDialer struct { - // Deadline is the time the Dial operation must connect before erroring. - Deadline time.Time - - // Retries is the number of additional connects to try if the connection times out, is refused, - // or the host is unreachable - Retries uint - - // RetryWait is the time to wait after a connection error to retry - RetryWait time.Duration - - rt *time.Timer // redial wait timer -} - -// Dial the Hyper-V socket at addr. -// -// See [HvsockDialer.Dial] for more information. -func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - return (&HvsockDialer{}).Dial(ctx, addr) -} - -// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. -// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between -// retries. -// -// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. -func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - op := "dial" - // create the conn early to use opErr() - conn = &HvsockConn{ - remote: *addr, - } - - if !d.Deadline.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, d.Deadline) - defer cancel() - } - - // preemptive timeout/cancellation check - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - sock, err := newHVSocket() - if err != nil { - return nil, conn.opErr(op, err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("bind", err)) - } - - c, err := sock.prepareIO() - if err != nil { - return nil, conn.opErr(op, err) - } - defer sock.wg.Done() - var bytes uint32 - for i := uint(0); i <= d.Retries; i++ { - err = socket.ConnectEx( - sock.handle, - &sa, - nil, // sendBuf - 0, // sendDataLen - &bytes, - (*windows.Overlapped)(unsafe.Pointer(&c.o))) - _, err = sock.asyncIO(c, nil, bytes, err) - if i < d.Retries && canRedial(err) { - if err = d.redialWait(ctx); err == nil { - continue - } - } - break - } - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) - } - - // update the connection properties, so shutdown can be used - if err = windows.Setsockopt( - sock.handle, - windows.SOL_SOCKET, - windows.SO_UPDATE_CONNECT_CONTEXT, - nil, // optvalue - 0, // optlen - ); err != nil { - return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) - } - - // get the local name - var sal rawHvsockAddr - err = socket.GetSockName(sock.handle, &sal) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) - } - conn.local.fromRaw(&sal) - - // one last check for timeout, since asyncIO doesn't check the context - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - conn.sock = sock - sock = nil - - return conn, nil -} - -// redialWait waits before attempting to redial, resetting the timer as appropriate. -func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { - if d.RetryWait == 0 { - return nil - } - - if d.rt == nil { - d.rt = time.NewTimer(d.RetryWait) - } else { - // should already be stopped and drained - d.rt.Reset(d.RetryWait) - } - - select { - case <-ctx.Done(): - case <-d.rt.C: - return nil - } - - // stop and drain the timer - if !d.rt.Stop() { - <-d.rt.C - } - return ctx.Err() -} - -// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. -func canRedial(err error) bool { - //nolint:errorlint // guaranteed to be an Errno - switch err { - case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, - windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: - return true - default: - return false - } -} - -func (conn *HvsockConn) opErr(op string, err error) error { - // translate from "file closed" to "socket closed" - if errors.Is(err, ErrFileClosed) { - err = socket.ErrSocketClosed - } - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsarecv", eno) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsasend", eno) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -// shutdown disables sending or receiving on a socket. -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return socket.ErrSocketClosed - } - - err := windows.Shutdown(conn.sock.handle, how) - if err != nil { - // If the connection was closed, shutdowns fail with "not connected" - if errors.Is(err, windows.WSAENOTCONN) || - errors.Is(err, windows.WSAESHUTDOWN) { - err = socket.ErrSocketClosed - } - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(windows.SHUT_RD) - if err != nil { - return conn.opErr("closeread", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(windows.SHUT_WR) - if err != nil { - return conn.opErr("closewrite", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - // todo: implement `SetDeadline` for `win32File` - if err := conn.SetReadDeadline(t); err != nil { - return fmt.Errorf("set read deadline: %w", err) - } - if err := conn.SetWriteDeadline(t); err != nil { - return fmt.Errorf("set write deadline: %w", err) - } - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go deleted file mode 100644 index 1f6538817..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// This package contains Win32 filesystem functionality. -package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go deleted file mode 100644 index 0cd9621df..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ /dev/null @@ -1,262 +0,0 @@ -//go:build windows - -package fs - -import ( - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/stringbuffer" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go - -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW - -const NullHandle windows.Handle = 0 - -// AccessMask defines standard, specific, and generic rights. -// -// Used with CreateFile and NtCreateFile (and co.). -// -// Bitmask: -// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 -// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 -// +---------------+---------------+-------------------------------+ -// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | -// |R|W|E|A| |S| | | -// +-+-------------+---------------+-------------------------------+ -// -// GR Generic Read -// GW Generic Write -// GE Generic Exectue -// GA Generic All -// Resvd Reserved -// AS Access Security System -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants -type AccessMask = windows.ACCESS_MASK - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // Not actually any. - // - // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters - FILE_ANY_ACCESS AccessMask = 0 - - GENERIC_READ AccessMask = 0x8000_0000 - GENERIC_WRITE AccessMask = 0x4000_0000 - GENERIC_EXECUTE AccessMask = 0x2000_0000 - GENERIC_ALL AccessMask = 0x1000_0000 - ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 - - // Specific Object Access - // from ntioapi.h - - FILE_READ_DATA AccessMask = (0x0001) // file & pipe - FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory - - FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe - FILE_ADD_FILE AccessMask = (0x0002) // directory - - FILE_APPEND_DATA AccessMask = (0x0004) // file - FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory - FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe - - FILE_READ_EA AccessMask = (0x0008) // file & directory - FILE_READ_PROPERTIES AccessMask = FILE_READ_EA - - FILE_WRITE_EA AccessMask = (0x0010) // file & directory - FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA - - FILE_EXECUTE AccessMask = (0x0020) // file - FILE_TRAVERSE AccessMask = (0x0020) // directory - - FILE_DELETE_CHILD AccessMask = (0x0040) // directory - - FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all - - FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all - - FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) - FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) - FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) - FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) - - SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF - - // Standard Access - // from ntseapi.h - - DELETE AccessMask = 0x0001_0000 - READ_CONTROL AccessMask = 0x0002_0000 - WRITE_DAC AccessMask = 0x0004_0000 - WRITE_OWNER AccessMask = 0x0008_0000 - SYNCHRONIZE AccessMask = 0x0010_0000 - - STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 - - STANDARD_RIGHTS_READ AccessMask = READ_CONTROL - STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL - STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL - - STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 -) - -type FileShareMode uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - FILE_SHARE_NONE FileShareMode = 0x00 - FILE_SHARE_READ FileShareMode = 0x01 - FILE_SHARE_WRITE FileShareMode = 0x02 - FILE_SHARE_DELETE FileShareMode = 0x04 - FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 -) - -type FileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - CREATE_NEW FileCreationDisposition = 0x01 - CREATE_ALWAYS FileCreationDisposition = 0x02 - OPEN_EXISTING FileCreationDisposition = 0x03 - OPEN_ALWAYS FileCreationDisposition = 0x04 - TRUNCATE_EXISTING FileCreationDisposition = 0x05 -) - -// Create disposition values for NtCreate* -type NTFileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_SUPERSEDE NTFileCreationDisposition = 0x00 - FILE_OPEN NTFileCreationDisposition = 0x01 - FILE_CREATE NTFileCreationDisposition = 0x02 - FILE_OPEN_IF NTFileCreationDisposition = 0x03 - FILE_OVERWRITE NTFileCreationDisposition = 0x04 - FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 - FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 -) - -// CreateFile and co. take flags or attributes together as one parameter. -// Define alias until we can use generics to allow both -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants -type FileFlagOrAttribute uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winnt.h - - FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 - FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 - FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 - FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 - FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 - FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 - FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 - FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 - FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 - FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 - FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 -) - -// NtCreate* functions take a dedicated CreateOptions parameter. -// -// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile -// -// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file -type NTCreateOptions uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 - FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 - FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 - FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 - - FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 - FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 - FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 - FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 - - FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 - FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 - FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 - FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 - - FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 - FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 - FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 - FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 -) - -type FileSQSFlag = FileFlagOrAttribute - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) - SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) - SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) - SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - - SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 -) - -// GetFinalPathNameByHandle flags -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters -type GetFinalPathFlag uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 - - FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 - FILE_NAME_OPENED GetFinalPathFlag = 0x8 - - VOLUME_NAME_DOS GetFinalPathFlag = 0x0 - VOLUME_NAME_GUID GetFinalPathFlag = 0x1 - VOLUME_NAME_NT GetFinalPathFlag = 0x2 - VOLUME_NAME_NONE GetFinalPathFlag = 0x4 -) - -// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle -// with the given handle and flags. It transparently takes care of creating a buffer of the -// correct size for the call. -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew -func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { - b := stringbuffer.NewWString() - //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? - for { - n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) - if err != nil { - return "", err - } - // If the buffer wasn't large enough, n will be the total size needed (including null terminator). - // Resize and try again. - if n > b.Cap() { - b.ResizeTo(n) - continue - } - // If the buffer is large enough, n will be the size not including the null terminator. - // Convert to a Go string and return. - return b.String(), nil - } -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go deleted file mode 100644 index 81760ac67..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package fs - -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level -type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` - -// Impersonation levels -const ( - SecurityAnonymous SecurityImpersonationLevel = 0 - SecurityIdentification SecurityImpersonationLevel = 1 - SecurityImpersonation SecurityImpersonationLevel = 2 - SecurityDelegation SecurityImpersonationLevel = 3 -) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go deleted file mode 100644 index a94e234c7..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package fs - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procCreateFileW = modkernel32.NewProc("CreateFileW") -) - -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go deleted file mode 100644 index 7e82f9afa..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go +++ /dev/null @@ -1,20 +0,0 @@ -package socket - -import ( - "unsafe" -) - -// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The -// struct must meet the Win32 sockaddr requirements specified here: -// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 -// -// Specifically, the struct size must be least larger than an int16 (unsigned short) -// for the address family. -type RawSockaddr interface { - // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing - // for the RawSockaddr's data to be overwritten by syscalls (if necessary). - // - // It is the callers responsibility to validate that the values are valid; invalid - // pointers or size can cause a panic. - Sockaddr() (unsafe.Pointer, int32, error) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go deleted file mode 100644 index 88580d974..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ /dev/null @@ -1,177 +0,0 @@ -//go:build windows - -package socket - -import ( - "errors" - "fmt" - "net" - "sync" - "syscall" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go - -//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname -//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername -//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const socketError = uintptr(^uint32(0)) - -var ( - // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? - - ErrBufferSize = errors.New("buffer size") - ErrAddrFamily = errors.New("address family") - ErrInvalidPointer = errors.New("invalid pointer") - ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) -) - -// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) - -// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. -// If rsa is not large enough, the [windows.WSAEFAULT] is returned. -func GetSockName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - // although getsockname returns WSAEFAULT if the buffer is too small, it does not set - // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy - return getsockname(s, ptr, &l) -} - -// GetPeerName returns the remote address the socket is connected to. -// -// See [GetSockName] for more information. -func GetPeerName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return getpeername(s, ptr, &l) -} - -func Bind(s windows.Handle, rsa RawSockaddr) (err error) { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return bind(s, ptr, l) -} - -// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the -// their sockaddr interface, so they cannot be used with HvsockAddr -// Replicate functionality here from -// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go - -// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at -// runtime via a WSAIoctl call: -// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks - -type runtimeFunc struct { - id guid.GUID - once sync.Once - addr uintptr - err error -} - -func (f *runtimeFunc) Load() error { - f.once.Do(func() { - var s windows.Handle - s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) - if f.err != nil { - return - } - defer windows.CloseHandle(s) //nolint:errcheck - - var n uint32 - f.err = windows.WSAIoctl(s, - windows.SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&f.id)), - uint32(unsafe.Sizeof(f.id)), - (*byte)(unsafe.Pointer(&f.addr)), - uint32(unsafe.Sizeof(f.addr)), - &n, - nil, // overlapped - 0, // completionRoutine - ) - }) - return f.err -} - -var ( - // todo: add `AcceptEx` and `GetAcceptExSockaddrs` - WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS - Data1: 0x25a207b9, - Data2: 0xddf3, - Data3: 0x4660, - Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, - } - - connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} -) - -func ConnectEx( - fd windows.Handle, - rsa RawSockaddr, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) error { - if err := connectExFunc.Load(); err != nil { - return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) - } - ptr, n, err := rsa.Sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -// BOOL LpfnConnectex( -// [in] SOCKET s, -// [in] const sockaddr *name, -// [in] int namelen, -// [in, optional] PVOID lpSendBuffer, -// [in] DWORD dwSendDataLength, -// [out] LPDWORD lpdwBytesSent, -// [in] LPOVERLAPPED lpOverlapped -// ) - -func connectEx( - s windows.Handle, - name unsafe.Pointer, - namelen int32, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) (err error) { - r1, _, e1 := syscall.SyscallN(connectExFunc.addr, - uintptr(s), - uintptr(name), - uintptr(namelen), - uintptr(unsafe.Pointer(sendBuf)), - uintptr(sendDataLen), - uintptr(unsafe.Pointer(bytesSent)), - uintptr(unsafe.Pointer(overlapped)), - ) - - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go deleted file mode 100644 index e1504126a..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package socket - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procbind = modws2_32.NewProc("bind") - procgetpeername = modws2_32.NewProc("getpeername") - procgetsockname = modws2_32.NewProc("getsockname") -) - -func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go deleted file mode 100644 index 42ebc019f..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ /dev/null @@ -1,132 +0,0 @@ -package stringbuffer - -import ( - "sync" - "unicode/utf16" -) - -// TODO: worth exporting and using in mkwinsyscall? - -// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate -// large path strings: -// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. -const MinWStringCap = 310 - -// use *[]uint16 since []uint16 creates an extra allocation where the slice header -// is copied to heap and then referenced via pointer in the interface header that sync.Pool -// stores. -var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly - New: func() interface{} { - b := make([]uint16, MinWStringCap) - return &b - }, -} - -func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } - -// freeBuffer copies the slice header data, and puts a pointer to that in the pool. -// This avoids taking a pointer to the slice header in WString, which can be set to nil. -func freeBuffer(b []uint16) { pathPool.Put(&b) } - -// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings -// for interacting with Win32 APIs. -// Sizes are specified as uint32 and not int. -// -// It is not thread safe. -type WString struct { - // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. - - // raw buffer - b []uint16 -} - -// NewWString returns a [WString] allocated from a shared pool with an -// initial capacity of at least [MinWStringCap]. -// Since the buffer may have been previously used, its contents are not guaranteed to be empty. -// -// The buffer should be freed via [WString.Free] -func NewWString() *WString { - return &WString{ - b: newBuffer(), - } -} - -func (b *WString) Free() { - if b.empty() { - return - } - freeBuffer(b.b) - b.b = nil -} - -// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the -// previous buffer back into pool. -func (b *WString) ResizeTo(c uint32) uint32 { - // already sufficient (or n is 0) - if c <= b.Cap() { - return b.Cap() - } - - if c <= MinWStringCap { - c = MinWStringCap - } - // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places - if c <= 2*b.Cap() { - c = 2 * b.Cap() - } - - b2 := make([]uint16, c) - if !b.empty() { - copy(b2, b.b) - freeBuffer(b.b) - } - b.b = b2 - return c -} - -// Buffer returns the underlying []uint16 buffer. -func (b *WString) Buffer() []uint16 { - if b.empty() { - return nil - } - return b.b -} - -// Pointer returns a pointer to the first uint16 in the buffer. -// If the [WString.Free] has already been called, the pointer will be nil. -func (b *WString) Pointer() *uint16 { - if b.empty() { - return nil - } - return &b.b[0] -} - -// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. -// -// It assumes that the data is null-terminated. -func (b *WString) String() string { - // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" - // and would make this code Windows-only, which makes no sense. - // So copy UTF16ToString code into here. - // If other windows-specific code is added, switch to [windows.UTF16ToString] - - s := b.b - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) -} - -// Cap returns the underlying buffer capacity. -func (b *WString) Cap() uint32 { - if b.empty() { - return 0 - } - return b.cap() -} - -func (b *WString) cap() uint32 { return uint32(cap(b.b)) } -func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index a2da6639d..000000000 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,586 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/fs" -) - -//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW -//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe -//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl - -type PipeConn interface { - net.Conn - Disconnect() error - Flush() error -} - -// type aliases for mkwinsyscall code -type ( - ntAccessMask = fs.AccessMask - ntFileShareMode = fs.FileShareMode - ntFileCreationDisposition = fs.NTFileCreationDisposition - ntFileOptions = fs.NTCreateOptions -) - -type ioStatusBlock struct { - Status, Information uintptr -} - -// typedef struct _OBJECT_ATTRIBUTES { -// ULONG Length; -// HANDLE RootDirectory; -// PUNICODE_STRING ObjectName; -// ULONG Attributes; -// PVOID SecurityDescriptor; -// PVOID SecurityQualityOfService; -// } OBJECT_ATTRIBUTES; -// -// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -// typedef struct _SECURITY_DESCRIPTOR { -// BYTE Revision; -// BYTE Sbz1; -// SECURITY_DESCRIPTOR_CONTROL Control; -// PSID Owner; -// PSID Group; -// PACL Sacl; -// PACL Dacl; -// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; -// -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl - Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl -} - -type ntStatus int32 - -func (status ntStatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - ErrPipeListenerClosed = net.ErrClosed - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -var _ PipeConn = (*win32Pipe)(nil) - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - if err := f.SetReadDeadline(t); err != nil { - return err - } - return f.SetWriteDeadline(t) -} - -func (f *win32Pipe) Disconnect() error { - return disconnectNamedPipe(f.win32File.handle) -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { //nolint:errorlint - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { - for { - select { - case <-ctx.Done(): - return windows.Handle(0), ctx.Err() - default: - h, err := fs.CreateFile(*path, - access, - 0, // mode - nil, // security attributes - fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), - 0, // template file handle - ) - if err == nil { - return h, nil - } - if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, cancel := context.WithDeadline(context.Background(), absTimeout) - defer cancel() - conn, err := DialPipeContext(ctx, path) - if errors.Is(err, context.DeadlineExceeded) { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) -} - -// PipeImpLevel is an enumeration of impersonation levels that may be set -// when calling DialPipeAccessImpersonation. -type PipeImpLevel uint32 - -const ( - PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) - PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) - PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) - PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) -) - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) -} - -// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with -// `access` at `impLevel` until `ctx` cancellation or timeout. The other -// DialPipe* implementations use PipeImpLevelAnonymous. -func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { - var err error - var h windows.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&windows.PIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle windows.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { - path16, err := windows.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], - &ntPath, - 0, - 0, - ).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck - oa.ObjectName = &ntPath - oa.Attributes = windows.OBJ_CASE_INSENSITIVE - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - //todo: does `sdb` need to be allocated on the heap, or can go allocate it? - l := uint32(len(sd)) - sdb, err := windows.LocalAlloc(0, l) - if err != nil { - return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) - } - defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %w", err) - } - defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck - - sdb := &securityDescriptor{ - Revision: 1, - Control: windows.SE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= windows.FILE_PIPE_MESSAGE_TYPE - } - - disposition := fs.FILE_OPEN - access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE - if first { - disposition = fs.FILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = fs.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h windows.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, - access, - &oa, - &iosb, - fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, - disposition, - 0, - typ, - 0, - 0, - 0xffffffff, - uint32(c.InputBufferSize), - uint32(c.OutputBufferSize), - &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno - } - } - windows.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIO() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIO(c, nil, 0, err) - if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 48ce4e924..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,232 +0,0 @@ -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" //nolint:gosec // not used for secure application - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122 section 4.1.1. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 // RFC 4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -func (v Version) String() string { - return strconv.FormatUint(uint64(v), 10) -} - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() //nolint:gosec // not used for secure application - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index 805bd3548..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 27e45ee5c..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go deleted file mode 100644 index 4076d3132..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. - -package guid - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[VariantUnknown-0] - _ = x[VariantNCS-1] - _ = x[VariantRFC4122-2] - _ = x[VariantMicrosoft-3] - _ = x[VariantFuture-4] -} - -const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" - -var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} - -func (i Variant) String() string { - if i >= Variant(len(_Variant_index)-1) { - return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] -} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index d9b90b6e8..000000000 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,196 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h windows.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - //revive:disable-next-line:var-naming ALL_CAPS - SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - - //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "Could not enable privilege " - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - privileges := make([]uint64, 0, len(names)) - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p := windows.CurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - _ = binary.Write(&b, binary.LittleEndian, p) - _ = binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(windows.SecurityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index 67d1a104a..000000000 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,131 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - _ = binary.Write(&b, binary.LittleEndian, flags) - } - - _ = binary.Write(&b, binary.LittleEndian, ntTarget16) - _ = binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index c3685e98e..000000000 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,133 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "fmt" - "unsafe" - - "golang.org/x/sys/windows" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch { - case errors.Is(e.Err, windows.ERROR_INVALID_SID): - s = "the security ID structure is invalid" - case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -func (e *AccountLookupError) Unwrap() error { return e.Err } - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -func (e *SddlConversionError) Unwrap() error { return e.Err } - -// LookupSidByName looks up the SID of an account by name -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) - return sid, nil -} - -// LookupNameBySid looks up the name of an account by SID -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupNameBySid(sid string) (name string, err error) { - if sid == "" { - return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} - } - - sidBuffer, err := windows.UTF16PtrFromString(sid) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - var sidPtr *byte - if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { - return "", &AccountLookupError{sid, err} - } - defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck - - var nameSize, refDomainSize, sidNameUse uint32 - err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{sid, err} - } - - nameBuffer := make([]uint16, nameSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - name = windows.UTF16ToString(nameBuffer) - return name, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - sd, err := windows.SecurityDescriptorFromString(sddl) - if err != nil { - return nil, &SddlConversionError{Sddl: sddl, Err: err} - } - b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) - return b, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { - return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) - } - s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) - return s.String(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index a6ca111b3..000000000 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build windows - -package winio - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 89b66eda8..000000000 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,378 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) - newport = windows.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func disconnectNamedPipe(pipe windows.Handle) (err error) { - r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h windows.Handle) { - r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) - h = windows.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) - status = ntStatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) - status = ntStatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) - status = ntStatus(r0) - return -} - -func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go new file mode 100644 index 000000000..8d2f05500 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go @@ -0,0 +1,46 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil/promlint" +) + +// CollectAndLint registers the provided Collector with a newly created pedantic +// Registry. It then calls GatherAndLint with that Registry and with the +// provided metricNames. +func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + return GatherAndLint(reg, metricNames...) +} + +// GatherAndLint gathers all metrics from the provided Gatherer and checks them +// with the linter in the promlint package. If any metricNames are provided, +// only metrics with those names are checked. +func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { + got, err := g.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + return promlint.NewWithMetricFamilies(got).Lint() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go new file mode 100644 index 000000000..9ba42826a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import dto "github.com/prometheus/client_model/go" + +// A Problem is an issue detected by a linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go new file mode 100644 index 000000000..ea46f38ec --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -0,0 +1,123 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promlint provides a linter for Prometheus metrics. +package promlint + +import ( + "errors" + "io" + "sort" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + // The linter will read metrics in the Prometheus text format from r and + // then lint it, _and_ it will lint the metrics provided directly as + // MetricFamily proto messages in mfs. Note, however, that the current + // constructor functions New and NewWithMetricFamilies only ever set one + // of them. + r io.Reader + mfs []*dto.MetricFamily + + customValidations []Validation +} + +// New creates a new Linter that reads an input stream of Prometheus metrics in +// the Prometheus text exposition format. +func New(r io.Reader) *Linter { + return &Linter{ + r: r, + } +} + +// NewWithMetricFamilies creates a new Linter that reads from a slice of +// MetricFamily protobuf messages. +func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { + return &Linter{ + mfs: mfs, + } +} + +// AddCustomValidations adds custom validations to the linter. +func (l *Linter) AddCustomValidations(vs ...Validation) { + if l.customValidations == nil { + l.customValidations = make([]Validation, 0, len(vs)) + } + l.customValidations = append(l.customValidations, vs...) +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + var problems []Problem + + if l.r != nil { + d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain)) + + mf := &dto.MetricFamily{} + for { + if err := d.Decode(mf); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, err + } + + problems = append(problems, l.lint(mf)...) + } + } + for _, mf := range l.mfs { + problems = append(problems, l.lint(mf)...) + } + + // Ensure deterministic output. + sort.SliceStable(problems, func(i, j int) bool { + if problems[i].Metric == problems[j].Metric { + return problems[i].Text < problems[j].Text + } + return problems[i].Metric < problems[j].Metric + }) + + return problems, nil +} + +// lint is the entry point for linting a single metric. +func (l *Linter) lint(mf *dto.MetricFamily) []Problem { + var problems []Problem + + for _, fn := range defaultValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) + } + } + + if l.customValidations != nil { + for _, fn := range l.customValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) + } + } + } + + // TODO(mdlayher): lint rules for specific metrics types. + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go new file mode 100644 index 000000000..e1441598d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -0,0 +1,34 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import ( + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint/validations" +) + +type Validation = func(mf *dto.MetricFamily) []error + +var defaultValidations = []Validation{ + validations.LintHelp, + validations.LintMetricUnits, + validations.LintCounter, + validations.LintHistogramSummaryReserved, + validations.LintMetricTypeInName, + validations.LintReservedChars, + validations.LintCamelCase, + validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go new file mode 100644 index 000000000..f2c2c3905 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func LintCounter(mf *dto.MetricFamily) []error { + var problems []error + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, errors.New(`counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`)) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 000000000..fdc1e6239 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go new file mode 100644 index 000000000..de52cfee4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -0,0 +1,101 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "fmt" + "regexp" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// LintMetricUnits detects issues with metric unit names. +func LintMetricUnits(mf *dto.MetricFamily) []error { + var problems []error + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit)) + + return problems +} + +// LintMetricTypeInName detects when the metric type is included in the metric name. +func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) + + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) + } + + return problems +} + +// LintReservedChars detects colons in metric names. +func LintReservedChars(mf *dto.MetricFamily) []error { + var problems []error + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, errors.New("metric names should not contain ':'")) + } + return problems +} + +// LintCamelCase detects metric names and label names written in camelCase. +func LintCamelCase(mf *dto.MetricFamily) []error { + var problems []error + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// LintUnitAbbreviations detects abbreviated units in the metric name. +func LintUnitAbbreviations(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, errors.New("metric names should not contain abbreviated units")) + } + } + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go new file mode 100644 index 000000000..1df294468 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + + dto "github.com/prometheus/client_model/go" +) + +// LintHelp detects issues related to the help text for a metric. +func LintHelp(mf *dto.MetricFamily) []error { + var problems []error + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, errors.New("no help text")) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go new file mode 100644 index 000000000..6564bdf36 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go new file mode 100644 index 000000000..967977d2b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import "strings" + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit, base string, ok bool) { + ss := strings.Split(m, "_") + + for _, s := range ss { + if base, found := units[s]; found { + return s, base, true + } + + for _, p := range unitPrefixes { + if strings.HasPrefix(s, p) { + if base, found := units[s[len(p):]]; found { + return s, base, true + } + } + } + } + + return "", "", false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go new file mode 100644 index 000000000..6f1200180 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -0,0 +1,332 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil provides helpers to test code using the prometheus package +// of client_golang. +// +// While writing unit tests to verify correct instrumentation of your code, it's +// a common mistake to mostly test the instrumentation library instead of your +// own code. Rather than verifying that a prometheus.Counter's value has changed +// as expected or that it shows up in the exposition after registration, it is +// in general more robust and more faithful to the concept of unit tests to use +// mock implementations of the prometheus.Counter and prometheus.Registerer +// interfaces that simply assert that the Add or Register methods have been +// called with the expected arguments. However, this might be overkill in simple +// scenarios. The ToFloat64 function is provided for simple inspection of a +// single-value metric, but it has to be used with caution. +// +// End-to-end tests to verify all or larger parts of the metrics exposition can +// be implemented with the CollectAndCompare or GatherAndCompare functions. The +// most appropriate use is not so much testing instrumentation of your code, but +// testing custom prometheus.Collector implementations and in particular whole +// exporters, i.e. programs that retrieve telemetry data from a 3rd party source +// and convert it into Prometheus metrics. +// +// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect +// metrics that have issues with their name, type, or metadata without being +// necessarily invalid, e.g. a counter with a name missing the “_total” suffix. +package testutil + +import ( + "bytes" + "fmt" + "io" + "net/http" + + "github.com/kylelemons/godebug/diff" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +// ToFloat64 collects all Metrics from the provided Collector. It expects that +// this results in exactly one Metric being collected, which must be a Gauge, +// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns +// the value of the collected Metric. +// +// The Collector provided is typically a simple instance of Gauge or Counter, or +// – less commonly – a GaugeVec or CounterVec with exactly one element. But any +// Collector fulfilling the prerequisites described above will do. +// +// Use this function with caution. It is computationally very expensive and thus +// not suited at all to read values from Metrics in regular code. This is really +// only for testing purposes, and even for testing, other approaches are often +// more appropriate (see this package's documentation). +// +// A clear anti-pattern would be to use a metric type from the prometheus +// package to track values that are also needed for something else than the +// exposition of Prometheus metrics. For example, you would like to track the +// number of items in a queue because your code should reject queuing further +// items if a certain limit is reached. It is tempting to track the number of +// items in a prometheus.Gauge, as it is then easily available as a metric for +// exposition, too. However, then you would need to call ToFloat64 in your +// regular code, potentially quite often. The recommended way is to track the +// number of items conventionally (in the way you would have done it without +// considering Prometheus metrics) and then expose the number with a +// prometheus.GaugeFunc. +func ToFloat64(c prometheus.Collector) float64 { + var ( + m prometheus.Metric + mCount int + mChan = make(chan prometheus.Metric) + done = make(chan struct{}) + ) + + go func() { + for m = range mChan { + mCount++ + } + close(done) + }() + + c.Collect(mChan) + close(mChan) + <-done + + if mCount != 1 { + panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount)) + } + + pb := &dto.Metric{} + if err := m.Write(pb); err != nil { + panic(fmt.Errorf("error happened while collecting metrics: %w", err)) + } + if pb.Gauge != nil { + return pb.Gauge.GetValue() + } + if pb.Counter != nil { + return pb.Counter.GetValue() + } + if pb.Untyped != nil { + return pb.Untyped.GetValue() + } + panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb)) +} + +// CollectAndCount registers the provided Collector with a newly created +// pedantic Registry. It then calls GatherAndCount with that Registry and with +// the provided metricNames. In the unlikely case that the registration or the +// gathering fails, this function panics. (This is inconsistent with the other +// CollectAnd… functions in this package and has historical reasons. Changing +// the function signature would be a breaking change and will therefore only +// happen with the next major version bump.) +func CollectAndCount(c prometheus.Collector, metricNames ...string) int { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + panic(fmt.Errorf("registering collector failed: %w", err)) + } + result, err := GatherAndCount(reg, metricNames...) + if err != nil { + panic(err) + } + return result +} + +// GatherAndCount gathers all metrics from the provided Gatherer and counts +// them. It returns the number of metric children in all gathered metric +// families together. If any metricNames are provided, only metrics with those +// names are counted. +func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { + got, err := g.Gather() + if err != nil { + return 0, fmt.Errorf("gathering metrics failed: %w", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + + result := 0 + for _, mf := range got { + result += len(mf.GetMetric()) + } + return result, nil +} + +// ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in +// plain text format. Then it compares it with the results that the `expected` would return. +// If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. +func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("scraping metrics failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("the scraping target returned a status code other than 200: %d", + resp.StatusCode) + } + + scraped, err := convertReaderToMetricFamily(resp.Body) + if err != nil { + return err + } + + wanted, err := convertReaderToMetricFamily(expected) + if err != nil { + return err + } + + return compareMetricFamilies(scraped, wanted, metricNames...) +} + +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. +func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return fmt.Errorf("registering collector failed: %w", err) + } + return GatherAndCompare(reg, expected, metricNames...) +} + +// GatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. +func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { + return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) +} + +// TransactionalGatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. +func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { + got, done, err := g.Gather() + defer done() + if err != nil { + return fmt.Errorf("gathering metrics failed: %w", err) + } + + wanted, err := convertReaderToMetricFamily(expected) + if err != nil { + return err + } + + return compareMetricFamilies(got, wanted, metricNames...) +} + +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + +// convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of +// dto.MetricFamily. +func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { + var tp expfmt.TextParser + notNormalized, err := tp.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("converting reader to metric families failed: %w", err) + } + + // The text protocol handles empty help fields inconsistently. When + // encoding, any non-nil value, include the empty string, produces a + // "# HELP" line. But when decoding, the help field is only set to a + // non-nil value if the "# HELP" line contains a non-empty value. + // + // Because metrics in a registry always have non-nil help fields, populate + // any nil help fields in the parsed metrics with the empty string so that + // when we compare text encodings, the results are consistent. + for _, metric := range notNormalized { + if metric.Help == nil { + metric.Help = proto.String("") + } + } + + return internal.NormalizeMetricFamilies(notNormalized), nil +} + +// compareMetricFamilies would compare 2 slices of metric families, and optionally filters both of +// them to the `metricNames` provided. +func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error { + if metricNames != nil { + got = filterMetrics(got, metricNames) + expected = filterMetrics(expected, metricNames) + } + + return compare(got, expected) +} + +// compare encodes both provided slices of metric families into the text format, +// compares their string message, and returns an error if they do not match. +// The error contains the encoded text of both the desired and the actual +// result. +func compare(got, want []*dto.MetricFamily) error { + var gotBuf, wantBuf bytes.Buffer + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + for _, mf := range got { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + for _, mf := range want { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding expected metrics failed: %w", err) + } + } + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { + return fmt.Errorf(diffErr) + } + return nil +} + +func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { + var filtered []*dto.MetricFamily + for _, m := range metrics { + for _, name := range names { + if m.GetName() == name { + filtered = append(filtered, m) + break + } + } + } + return filtered +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 831666c1a..ab5c420f7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,9 +128,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/DataDog/datadog-go/v5 v5.6.0 -## explicit; go 1.13 -github.com/DataDog/datadog-go/v5/statsd # github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 ## explicit; go 1.21 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp @@ -140,13 +137,6 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric # github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 ## explicit; go 1.22 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping -# github.com/Microsoft/go-winio v0.6.2 -## explicit; go 1.21 -github.com/Microsoft/go-winio -github.com/Microsoft/go-winio/internal/fs -github.com/Microsoft/go-winio/internal/socket -github.com/Microsoft/go-winio/internal/stringbuffer -github.com/Microsoft/go-winio/pkg/guid # github.com/ProtonMail/go-crypto v1.1.3 ## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves @@ -616,6 +606,9 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/testutil +github.com/prometheus/client_golang/prometheus/testutil/promlint +github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go From 580c6bc7fc45f3433ba8cca3b85db19b781199f5 Mon Sep 17 00:00:00 2001 From: Alex Cottner Date: Thu, 23 Jan 2025 14:36:55 -0700 Subject: [PATCH 2/4] Removed unused function --- handlers.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/handlers.go b/handlers.go index c995f58d0..8296100f1 100644 --- a/handlers.go +++ b/handlers.go @@ -520,14 +520,3 @@ func (a *autographer) handleGetAuthKeyIDs(w http.ResponseWriter, r *http.Request w.WriteHeader(http.StatusOK) w.Write(signerIDsJSON) } - -// usedDefaultSignerTag returns a statds tag indicating whether the default -// signer for an authorization was used. -func usedDefaultSignerTag(sigreq formats.SignatureRequest) string { - // TODO(AUT-206): remove this when we've migrate everyone off of the default - // keyid - if sigreq.KeyID == "" { - return "used_default_signer:true" - } - return "used_default_signer:false" -} From 520f0cb019b0e6671e7b907c3d528e46b000ec6d Mon Sep 17 00:00:00 2001 From: Alex Cottner <148472676+alexcottner@users.noreply.github.com> Date: Fri, 24 Jan 2025 15:05:43 -0700 Subject: [PATCH 3/4] remove commented out code Co-authored-by: Se Yeon Kim <25109943+say-yawn@users.noreply.github.com> --- main.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/main.go b/main.go index f94eb926d..39c6300ae 100644 --- a/main.go +++ b/main.go @@ -475,10 +475,6 @@ func (a *autographer) addSigners(signerConfs []signer.Configuration) error { // statsClient *signer.StatsClient err error ) - // statsClient, err = signer.NewStatsClient(signerConf) - // if statsClient == nil || err != nil { - // return fmt.Errorf("failed to add signer stats client %q or got back nil statsClient: %w", signerConf.ID, err) - // } // give the database handler to the signer configuration if a.db != nil { signerConf.DB = a.db From 1d123f954a0ae6bd82836a0e77da6febaeaefce7 Mon Sep 17 00:00:00 2001 From: Alex Cottner Date: Tue, 28 Jan 2025 09:53:41 -0700 Subject: [PATCH 4/4] Adjusting based on PR feedback. --- main.go | 3 +-- stats.go | 7 ++++++- stats_test.go | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/main.go b/main.go index 39c6300ae..2ce28978c 100644 --- a/main.go +++ b/main.go @@ -471,8 +471,7 @@ func (a *autographer) addSigners(signerConfs []signer.Configuration) error { } sids[signerConf.ID] = true var ( - s signer.Signer - // statsClient *signer.StatsClient + s signer.Signer err error ) // give the database handler to the signer configuration diff --git a/stats.go b/stats.go index 533cf2e92..1566466b8 100644 --- a/stats.go +++ b/stats.go @@ -73,11 +73,16 @@ func (w *statsWriter) WriteHeader(statusCode int) { "statusCode": fmt.Sprintf("%d", statusCode), }).Inc() - if statusCode >= 200 && statusCode < 300 || statusCode >= 400 && statusCode < 500 { + if statusCode >= 200 && statusCode < 300 { responseSuccessCounter.With(prometheus.Labels{ "handler": w.handlerName, "status": "success", }).Inc() + } else if statusCode >= 400 && statusCode < 500 { + responseSuccessCounter.With(prometheus.Labels{ + "handler": w.handlerName, + "status": "client_failure", + }).Inc() } else { responseSuccessCounter.With(prometheus.Labels{ "handler": w.handlerName, diff --git a/stats_test.go b/stats_test.go index 90fd997f3..d89224e79 100644 --- a/stats_test.go +++ b/stats_test.go @@ -27,8 +27,8 @@ func TestStatsResponseWriterWritesResponseMetricOnce(t *testing.T) { t.Fatalf("tried to write to the headers again: Expected status code %d, got %d", http.StatusBadRequest, recorder.Code) } - if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success")) != float64(1) { - t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "success"))) + if testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "client_failure")) != float64(1) { + t.Fatalf("Expected responseSuccessCounter to be 1, got %f", testutil.ToFloat64(responseSuccessCounter.WithLabelValues("myhandler", "client_failure"))) } if testutil.ToFloat64(responseStatusCounter.WithLabelValues("myhandler", "400")) != float64(1) {