From af2aafe809a69fd633e2b9a15f889bcaab60bfe0 Mon Sep 17 00:00:00 2001 From: Sascha Schwarze Date: Thu, 22 Aug 2024 10:41:17 +0200 Subject: [PATCH] Make Kubernetes dependencies and Go version consistent --- .github/workflows/ci.yml | 6 +- .github/workflows/mirror-images.yaml | 1 + .github/workflows/nightly.yaml | 2 +- .github/workflows/release.yaml | 2 +- .github/workflows/update-tekton-version.yaml | 2 +- .github/workflows/verify.yaml | 2 +- HACK.md | 2 +- cmd/shipwright-build-controller/main.go | 8 +- docs/buildstrategies.md | 2 +- go.mod | 16 +- go.sum | 41 +- pkg/controller/fakes/manager.go | 78 ++-- samples/v1alpha1/build/build_ko_cr.yaml | 2 +- .../buildstrategy/ko/buildstrategy_ko_cr.yaml | 2 +- samples/v1beta1/build/build_ko_cr.yaml | 2 +- .../buildstrategy/ko/buildstrategy_ko_cr.yaml | 2 +- test/utils/v1alpha1/controllers.go | 4 +- test/utils/v1beta1/controllers.go | 4 +- vendor/github.com/go-logr/logr/slogr/slogr.go | 61 +++ vendor/github.com/go-logr/zapr/.golangci.yaml | 20 + vendor/github.com/go-logr/zapr/README.md | 39 +- vendor/github.com/go-logr/zapr/slogzapr.go | 183 ++++++++ vendor/github.com/go-logr/zapr/zapr.go | 15 +- vendor/github.com/go-logr/zapr/zapr_noslog.go | 34 ++ vendor/github.com/go-logr/zapr/zapr_slog.go | 48 +++ vendor/modules.txt | 23 +- .../controller-runtime/pkg/cache/cache.go | 404 ++++++++++++++---- .../pkg/cache/delegating_by_gvk_cache.go | 135 ++++++ .../pkg/cache/informer_cache.go | 85 +++- .../pkg/cache/internal/cache_reader.go | 73 +++- .../pkg/cache/internal/informers.go | 177 ++++---- .../pkg/cache/internal/transformers.go | 55 --- .../pkg/cache/multi_namespace_cache.go | 193 +++++---- .../pkg/certwatcher/certwatcher.go | 10 +- .../pkg/client/apiutil/apimachinery.go | 21 - .../pkg/client/apiutil/errors.go | 54 +++ .../pkg/client/apiutil/restmapper.go | 90 ++-- .../controller-runtime/pkg/client/client.go | 26 +- .../pkg/client/fieldowner.go | 106 +++++ .../pkg/client/interfaces.go | 1 + .../controller-runtime/pkg/client/options.go | 5 +- .../controller-runtime/pkg/cluster/cluster.go | 42 +- .../config/v1alpha1/zz_generated.deepcopy.go | 1 - .../pkg/controller/controller.go | 4 +- .../controllerutil/controllerutil.go | 116 ++++- .../pkg/handler/eventhandler.go | 2 +- .../pkg/internal/controller/controller.go | 8 + .../pkg/internal/field/selector/utils.go | 16 +- .../pkg/internal/syncs/syncs.go | 38 ++ .../controller-runtime/pkg/log/deleg.go | 3 + .../controller-runtime/pkg/log/log.go | 11 +- .../controller-runtime/pkg/log/zap/zap.go | 10 +- .../pkg/manager/internal.go | 124 ++---- .../controller-runtime/pkg/manager/manager.go | 193 +++------ .../pkg/manager/runnable_group.go | 19 +- .../pkg/metrics/listener.go | 52 --- .../pkg/metrics/server/doc.go | 26 ++ .../pkg/metrics/server/server.go | 332 ++++++++++++++ .../pkg/metrics/workqueue.go | 4 +- .../pkg/reconcile/reconcile.go | 44 +- .../pkg/webhook/admission/defaulter.go | 2 + .../pkg/webhook/admission/http.go | 58 ++- .../pkg/webhook/admission/validator.go | 2 + .../pkg/webhook/admission/validator_custom.go | 1 - .../controller-runtime/pkg/webhook/alias.go | 2 + .../controller-runtime/pkg/webhook/server.go | 47 +- 66 files changed, 2285 insertions(+), 908 deletions(-) create mode 100644 vendor/github.com/go-logr/logr/slogr/slogr.go create mode 100644 vendor/github.com/go-logr/zapr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/zapr/slogzapr.go create mode 100644 vendor/github.com/go-logr/zapr/zapr_noslog.go create mode 100644 vendor/github.com/go-logr/zapr/zapr_slog.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d38518d2f..f76e50013d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true - name: Install Trivy @@ -76,7 +76,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true - name: Install Ko @@ -162,7 +162,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true - name: Install kubectl diff --git a/.github/workflows/mirror-images.yaml b/.github/workflows/mirror-images.yaml index adb32410f0..c2b743263d 100644 --- a/.github/workflows/mirror-images.yaml +++ b/.github/workflows/mirror-images.yaml @@ -24,6 +24,7 @@ jobs: library/golang:1.19 \ library/golang:1.20 \ library/golang:1.21 \ + library/golang:1.22 \ library/maven:3-jdk-8-openj9 \ library/node:12 \ library/node:14 \ diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 1c868e9fc9..5fd3ca54e3 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 93f1a9a5d4..3404caa8f6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -37,7 +37,7 @@ jobs: fetch-depth: 0 # Fetch all history, needed for release note generation. - uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true diff --git a/.github/workflows/update-tekton-version.yaml b/.github/workflows/update-tekton-version.yaml index a35b20b222..7dfae21e97 100644 --- a/.github/workflows/update-tekton-version.yaml +++ b/.github/workflows/update-tekton-version.yaml @@ -22,7 +22,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index e44f6b177f..93fdc23cf7 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -27,7 +27,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: '1.21.x' + go-version: '1.22.x' cache: true check-latest: true cache-dependency-path: go/src/github.com/shipwright-io/build diff --git a/HACK.md b/HACK.md index ca7e76d259..c99f27bdb1 100644 --- a/HACK.md +++ b/HACK.md @@ -41,7 +41,7 @@ In the near future, the above would be setup by the controller. make clean && make build ``` -* This project uses Golang 1.21 and controller-gen v0.12.1. +* This project uses Golang 1.22 and controller-gen v0.12.1. * The controllers create/watch Tekton objects. # Testing diff --git a/cmd/shipwright-build-controller/main.go b/cmd/shipwright-build-controller/main.go index c838de5866..7ee26bdaf4 100644 --- a/cmd/shipwright-build-controller/main.go +++ b/cmd/shipwright-build-controller/main.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" buildconfig "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/controller" @@ -90,8 +91,9 @@ func main() { LeaseDuration: buildCfg.ManagerOptions.LeaseDuration, RenewDeadline: buildCfg.ManagerOptions.RenewDeadline, RetryPeriod: buildCfg.ManagerOptions.RetryPeriod, - Namespace: "", - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + Metrics: server.Options{ + BindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + }, }) if err != nil { ctxlog.Error(ctx, err, "") @@ -122,7 +124,7 @@ func main() { // Add optionally configured extra handlers to metrics endpoint for path, handler := range buildMetrics.ExtraHandlers() { ctxlog.Info(ctx, "Adding metrics extra handler path", "path", path) - if err := mgr.AddMetricsExtraHandler(path, handler); err != nil { + if err := mgr.AddMetricsServerExtraHandler(path, handler); err != nil { ctxlog.Error(ctx, err, "") os.Exit(2) } diff --git a/docs/buildstrategies.md b/docs/buildstrategies.md index 5c47e29629..698e3eda85 100644 --- a/docs/buildstrategies.md +++ b/docs/buildstrategies.md @@ -311,7 +311,7 @@ The build strategy provides the following parameters that you can set in a Build | Parameter | Description | Default | |---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| | `go-flags` | Value for the GOFLAGS environment variable. | Empty | -| `go-version` | Version of Go, must match a tag from [the golang image](https://hub.docker.com/_/golang?tab=tags) | `1.21` | +| `go-version` | Version of Go, must match a tag from [the golang image](https://hub.docker.com/_/golang?tab=tags) | `1.22` | | `ko-version` | Version of ko, must be either `latest` for the newest release, or a [ko release name](https://github.com/ko-build/ko/releases) | `latest` | | `package-directory` | The directory inside the context directory containing the main package. | `.` | | `target-platform` | Target platform to be built. For example: `linux/arm64`. Multiple platforms can be provided separated by comma, for example: `linux/arm64,linux/amd64`. The value `all` will build all platforms supported by the base image. The value `current` will build the platform on which the build runs. | `current` | diff --git a/go.mod b/go.mod index add523a9af..22669a028e 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/shipwright-io/build go 1.22 -toolchain go1.22.6 - require ( github.com/docker/cli v27.1.2+incompatible github.com/go-git/go-git/v5 v5.12.0 @@ -19,15 +17,15 @@ require ( github.com/spf13/pflag v1.0.5 github.com/tektoncd/pipeline v0.62.1 go.uber.org/zap v1.27.0 - k8s.io/api v0.29.6 - k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/api v0.29.7 + k8s.io/apiextensions-apiserver v0.29.7 k8s.io/apimachinery v0.29.7 - k8s.io/client-go v0.29.6 + k8s.io/client-go v0.29.7 k8s.io/code-generator v0.29.7 - k8s.io/kubectl v0.27.11 + k8s.io/kubectl v0.29.7 k8s.io/utils v0.0.0-20240102154912-e7106e64919e knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 - sigs.k8s.io/controller-runtime v0.15.3 + sigs.k8s.io/controller-runtime v0.17.6 sigs.k8s.io/yaml v1.4.0 ) @@ -57,7 +55,7 @@ require ( github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -121,7 +119,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.29.2 // indirect + k8s.io/component-base v0.29.7 // indirect k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/go.sum b/go.sum index e62d59ae0f..d1dd523d73 100644 --- a/go.sum +++ b/go.sum @@ -58,7 +58,6 @@ github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8 github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -141,11 +140,10 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -401,7 +399,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -412,16 +409,12 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -468,7 +461,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= @@ -505,7 +497,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -535,7 +526,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -576,9 +566,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -661,7 +649,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= @@ -795,18 +782,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.6 h1:eDxIl8+PeEpwbe2YyS5RXJ9vdn4hnKWMBf4WUJP9DQM= -k8s.io/api v0.29.6/go.mod h1:ZuUPMhJV74DJXapldbg6upaHfiOjrBb+0ffUbBi1jaw= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/api v0.29.7 h1:Q2/thp7YYESgy0MGzxT9RvA/6doLJHBXSFH8GGLxSbc= +k8s.io/api v0.29.7/go.mod h1:mPimdbyuIjwoLtBEVIGVUYb4BKOE+44XHt/n4IqKsLA= +k8s.io/apiextensions-apiserver v0.29.7 h1:X62u7vUGfwW5rYJB5jkZDr0uV2XSyEHJRdxnfD5PaLs= +k8s.io/apiextensions-apiserver v0.29.7/go.mod h1:JzBXxlZKKdtEYGr4yiN+s0eXheCTYgKDay8JXPfSGoQ= k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc= k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/client-go v0.29.6 h1:5E2ebuB/p0F0THuQatyvhDvPL2SIeqwTPrtnrwKob/8= -k8s.io/client-go v0.29.6/go.mod h1:jHZcrQqDplyv20v7eu+iFM4gTpglZSZoMVcKrh8sRGg= +k8s.io/client-go v0.29.7 h1:vTtiFrGBKlcBhxaeZC4eDrqui1e108nsTyue/KU63IY= +k8s.io/client-go v0.29.7/go.mod h1:69BvVqdRozgR/9TP45u/oO0tfrdbP+I8RqrcCJQshzg= k8s.io/code-generator v0.29.7 h1:NEwmKOJVNObCh3upBLEojL1QuJMzGplOTYZnee4h0TY= k8s.io/code-generator v0.29.7/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/component-base v0.29.7 h1:zXLJvZjvvDWdYmZCwZYk95E1Fd2oRXUz71mQukkRk5I= +k8s.io/component-base v0.29.7/go.mod h1:ddLTpIrjazaRI1EG83M41GNcYEAdskuQmx4JOOSXCOg= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 h1:izq7u3SJBdOAuA5YYe1/PIp9jczrih/jGlKRRt0G7bQ= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -814,8 +801,8 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.27.11 h1:rndS6LxY+qjTsEE/frCRlCL/TWeokWOgynSa3v5lbPM= -k8s.io/kubectl v0.27.11/go.mod h1:1eq/sCLAWOsPaHt/zojqEvVJECOmC68ol6eO4RyC4oc= +k8s.io/kubectl v0.29.7 h1:D+Jheug9M++zlt67cROZgxaIjrDdLqp9jkW/EYrXAoM= +k8s.io/kubectl v0.29.7/go.mod h1:VOEJkcfKTO/X8xSSB6d2JXP/Qni6xtjuI3CUP52T9bM= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 h1:m2ahFUO0L2VrgGDYdyOUFdE6xBd3pLXAJozLJwqLRQM= @@ -823,8 +810,8 @@ knative.dev/pkg v0.0.0-20240416145024-0f34a8815650/go.mod h1:soFw5ss08G4PU3JiFDK rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= -sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= +sigs.k8s.io/controller-runtime v0.17.6 h1:12IXsozEsIXWAMRpgRlYS1jjAHQXHtWEOMdULh3DbEw= +sigs.k8s.io/controller-runtime v0.17.6/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/pkg/controller/fakes/manager.go b/pkg/controller/fakes/manager.go index ad7a85328b..b64db10c69 100644 --- a/pkg/controller/fakes/manager.go +++ b/pkg/controller/fakes/manager.go @@ -46,16 +46,16 @@ type FakeManager struct { addHealthzCheckReturnsOnCall map[int]struct { result1 error } - AddMetricsExtraHandlerStub func(string, http.Handler) error - addMetricsExtraHandlerMutex sync.RWMutex - addMetricsExtraHandlerArgsForCall []struct { + AddMetricsServerExtraHandlerStub func(string, http.Handler) error + addMetricsServerExtraHandlerMutex sync.RWMutex + addMetricsServerExtraHandlerArgsForCall []struct { arg1 string arg2 http.Handler } - addMetricsExtraHandlerReturns struct { + addMetricsServerExtraHandlerReturns struct { result1 error } - addMetricsExtraHandlerReturnsOnCall map[int]struct { + addMetricsServerExtraHandlerReturnsOnCall map[int]struct { result1 error } AddReadyzCheckStub func(string, healthz.Checker) error @@ -339,17 +339,17 @@ func (fake *FakeManager) AddHealthzCheckReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeManager) AddMetricsExtraHandler(arg1 string, arg2 http.Handler) error { - fake.addMetricsExtraHandlerMutex.Lock() - ret, specificReturn := fake.addMetricsExtraHandlerReturnsOnCall[len(fake.addMetricsExtraHandlerArgsForCall)] - fake.addMetricsExtraHandlerArgsForCall = append(fake.addMetricsExtraHandlerArgsForCall, struct { +func (fake *FakeManager) AddMetricsServerExtraHandler(arg1 string, arg2 http.Handler) error { + fake.addMetricsServerExtraHandlerMutex.Lock() + ret, specificReturn := fake.addMetricsServerExtraHandlerReturnsOnCall[len(fake.addMetricsServerExtraHandlerArgsForCall)] + fake.addMetricsServerExtraHandlerArgsForCall = append(fake.addMetricsServerExtraHandlerArgsForCall, struct { arg1 string arg2 http.Handler }{arg1, arg2}) - stub := fake.AddMetricsExtraHandlerStub - fakeReturns := fake.addMetricsExtraHandlerReturns - fake.recordInvocation("AddMetricsExtraHandler", []interface{}{arg1, arg2}) - fake.addMetricsExtraHandlerMutex.Unlock() + stub := fake.AddMetricsServerExtraHandlerStub + fakeReturns := fake.addMetricsServerExtraHandlerReturns + fake.recordInvocation("AddMetricsServerExtraHandler", []interface{}{arg1, arg2}) + fake.addMetricsServerExtraHandlerMutex.Unlock() if stub != nil { return stub(arg1, arg2) } @@ -359,44 +359,44 @@ func (fake *FakeManager) AddMetricsExtraHandler(arg1 string, arg2 http.Handler) return fakeReturns.result1 } -func (fake *FakeManager) AddMetricsExtraHandlerCallCount() int { - fake.addMetricsExtraHandlerMutex.RLock() - defer fake.addMetricsExtraHandlerMutex.RUnlock() - return len(fake.addMetricsExtraHandlerArgsForCall) +func (fake *FakeManager) AddMetricsServerExtraHandlerCallCount() int { + fake.addMetricsServerExtraHandlerMutex.RLock() + defer fake.addMetricsServerExtraHandlerMutex.RUnlock() + return len(fake.addMetricsServerExtraHandlerArgsForCall) } -func (fake *FakeManager) AddMetricsExtraHandlerCalls(stub func(string, http.Handler) error) { - fake.addMetricsExtraHandlerMutex.Lock() - defer fake.addMetricsExtraHandlerMutex.Unlock() - fake.AddMetricsExtraHandlerStub = stub +func (fake *FakeManager) AddMetricsServerExtraHandlerCalls(stub func(string, http.Handler) error) { + fake.addMetricsServerExtraHandlerMutex.Lock() + defer fake.addMetricsServerExtraHandlerMutex.Unlock() + fake.AddMetricsServerExtraHandlerStub = stub } -func (fake *FakeManager) AddMetricsExtraHandlerArgsForCall(i int) (string, http.Handler) { - fake.addMetricsExtraHandlerMutex.RLock() - defer fake.addMetricsExtraHandlerMutex.RUnlock() - argsForCall := fake.addMetricsExtraHandlerArgsForCall[i] +func (fake *FakeManager) AddMetricsServerExtraHandlerArgsForCall(i int) (string, http.Handler) { + fake.addMetricsServerExtraHandlerMutex.RLock() + defer fake.addMetricsServerExtraHandlerMutex.RUnlock() + argsForCall := fake.addMetricsServerExtraHandlerArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeManager) AddMetricsExtraHandlerReturns(result1 error) { - fake.addMetricsExtraHandlerMutex.Lock() - defer fake.addMetricsExtraHandlerMutex.Unlock() - fake.AddMetricsExtraHandlerStub = nil - fake.addMetricsExtraHandlerReturns = struct { +func (fake *FakeManager) AddMetricsServerExtraHandlerReturns(result1 error) { + fake.addMetricsServerExtraHandlerMutex.Lock() + defer fake.addMetricsServerExtraHandlerMutex.Unlock() + fake.AddMetricsServerExtraHandlerStub = nil + fake.addMetricsServerExtraHandlerReturns = struct { result1 error }{result1} } -func (fake *FakeManager) AddMetricsExtraHandlerReturnsOnCall(i int, result1 error) { - fake.addMetricsExtraHandlerMutex.Lock() - defer fake.addMetricsExtraHandlerMutex.Unlock() - fake.AddMetricsExtraHandlerStub = nil - if fake.addMetricsExtraHandlerReturnsOnCall == nil { - fake.addMetricsExtraHandlerReturnsOnCall = make(map[int]struct { +func (fake *FakeManager) AddMetricsServerExtraHandlerReturnsOnCall(i int, result1 error) { + fake.addMetricsServerExtraHandlerMutex.Lock() + defer fake.addMetricsServerExtraHandlerMutex.Unlock() + fake.AddMetricsServerExtraHandlerStub = nil + if fake.addMetricsServerExtraHandlerReturnsOnCall == nil { + fake.addMetricsServerExtraHandlerReturnsOnCall = make(map[int]struct { result1 error }) } - fake.addMetricsExtraHandlerReturnsOnCall[i] = struct { + fake.addMetricsServerExtraHandlerReturnsOnCall[i] = struct { result1 error }{result1} } @@ -1228,8 +1228,8 @@ func (fake *FakeManager) Invocations() map[string][][]interface{} { defer fake.addMutex.RUnlock() fake.addHealthzCheckMutex.RLock() defer fake.addHealthzCheckMutex.RUnlock() - fake.addMetricsExtraHandlerMutex.RLock() - defer fake.addMetricsExtraHandlerMutex.RUnlock() + fake.addMetricsServerExtraHandlerMutex.RLock() + defer fake.addMetricsServerExtraHandlerMutex.RUnlock() fake.addReadyzCheckMutex.RLock() defer fake.addReadyzCheckMutex.RUnlock() fake.electedMutex.RLock() diff --git a/samples/v1alpha1/build/build_ko_cr.yaml b/samples/v1alpha1/build/build_ko_cr.yaml index 5bcdb1ec68..e89883ca31 100644 --- a/samples/v1alpha1/build/build_ko_cr.yaml +++ b/samples/v1alpha1/build/build_ko_cr.yaml @@ -10,7 +10,7 @@ spec: - name: go-flags value: "-v -mod=vendor -ldflags=-w" - name: go-version - value: "1.21" + value: "1.22" - name: package-directory value: ./cmd/shipwright-build-controller source: diff --git a/samples/v1alpha1/buildstrategy/ko/buildstrategy_ko_cr.yaml b/samples/v1alpha1/buildstrategy/ko/buildstrategy_ko_cr.yaml index 0ef3620bc4..15012dc0fc 100644 --- a/samples/v1alpha1/buildstrategy/ko/buildstrategy_ko_cr.yaml +++ b/samples/v1alpha1/buildstrategy/ko/buildstrategy_ko_cr.yaml @@ -10,7 +10,7 @@ spec: default: "" - name: go-version description: "Version of Go, must match a tag from https://hub.docker.com/_/golang?tab=tags" - default: "1.21" + default: "1.22" - name: ko-version description: "Version of ko, must be either 'latest', or a release name from https://github.com/ko-build/ko/releases" default: latest diff --git a/samples/v1beta1/build/build_ko_cr.yaml b/samples/v1beta1/build/build_ko_cr.yaml index 037c84c8a1..2ca3d3c2de 100644 --- a/samples/v1beta1/build/build_ko_cr.yaml +++ b/samples/v1beta1/build/build_ko_cr.yaml @@ -8,7 +8,7 @@ spec: - name: go-flags value: "-v -mod=vendor -ldflags=-w" - name: go-version - value: "1.21" + value: "1.22" - name: package-directory value: ./cmd/shipwright-build-controller source: diff --git a/samples/v1beta1/buildstrategy/ko/buildstrategy_ko_cr.yaml b/samples/v1beta1/buildstrategy/ko/buildstrategy_ko_cr.yaml index 922ac28dea..0a2498ab90 100644 --- a/samples/v1beta1/buildstrategy/ko/buildstrategy_ko_cr.yaml +++ b/samples/v1beta1/buildstrategy/ko/buildstrategy_ko_cr.yaml @@ -10,7 +10,7 @@ spec: default: "" - name: go-version description: "Version of Go, must match a tag from https://hub.docker.com/_/golang?tab=tags" - default: "1.21" + default: "1.22" - name: ko-version description: "Version of ko, must be either 'latest', or a release name from https://github.com/ko-build/ko/releases" default: latest diff --git a/test/utils/v1alpha1/controllers.go b/test/utils/v1alpha1/controllers.go index 539dd58f06..26f4317339 100644 --- a/test/utils/v1alpha1/controllers.go +++ b/test/utils/v1alpha1/controllers.go @@ -21,9 +21,7 @@ func (t *TestBuild) StartBuildControllers() error { c.SetConfigFromEnv() mgr, err := controller.NewManager(t.Context, c, t.KubeConfig, manager.Options{ - Namespace: t.Namespace, - LeaderElection: false, - MetricsBindAddress: "0", + LeaderElection: false, }) if err != nil { return err diff --git a/test/utils/v1beta1/controllers.go b/test/utils/v1beta1/controllers.go index 539dd58f06..26f4317339 100644 --- a/test/utils/v1beta1/controllers.go +++ b/test/utils/v1beta1/controllers.go @@ -21,9 +21,7 @@ func (t *TestBuild) StartBuildControllers() error { c.SetConfigFromEnv() mgr, err := controller.NewManager(t.Context, c, t.KubeConfig, manager.Options{ - Namespace: t.Namespace, - LeaderElection: false, - MetricsBindAddress: "0", + LeaderElection: false, }) if err != nil { return err diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 0000000000..36432c56fd --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,61 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +// +// Deprecated: use the main logr package instead. +package slogr + +import ( + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// Deprecated: use [logr.FromSlogHandler] instead. +func NewLogr(handler slog.Handler) logr.Logger { + return logr.FromSlogHandler(handler) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func NewSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func ToSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. +// +// Deprecated: use [logr.SlogSink] instead. +type SlogSink = logr.SlogSink diff --git a/vendor/github.com/go-logr/zapr/.golangci.yaml b/vendor/github.com/go-logr/zapr/.golangci.yaml new file mode 100644 index 0000000000..64246c50cc --- /dev/null +++ b/vendor/github.com/go-logr/zapr/.golangci.yaml @@ -0,0 +1,20 @@ +issues: + exclude-use-default: false + +linters: + disable-all: true + enable: + - asciicheck + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused diff --git a/vendor/github.com/go-logr/zapr/README.md b/vendor/github.com/go-logr/zapr/README.md index 78f5f7653f..ff332da3a1 100644 --- a/vendor/github.com/go-logr/zapr/README.md +++ b/vendor/github.com/go-logr/zapr/README.md @@ -2,12 +2,17 @@ Zapr :zap: ========== A [logr](https://github.com/go-logr/logr) implementation using -[Zap](https://github.com/uber-go/zap). +[Zap](https://github.com/uber-go/zap). Can also be used as +[slog](https://pkg.go.dev/log/slog) handler. Usage ----- +Via logr: + ```go +package main + import ( "fmt" @@ -29,6 +34,33 @@ func main() { } ``` +Via slog: + +``` +package main + +import ( + "fmt" + "log/slog" + + "github.com/go-logr/logr/slogr" + "github.com/go-logr/zapr" + "go.uber.org/zap" +) + +func main() { + var log *slog.Logger + + zapLog, err := zap.NewDevelopment() + if err != nil { + panic(fmt.Sprintf("who watches the watchmen (%v)?", err)) + } + log = slog.New(slogr.NewSlogHandler(zapr.NewLogger(zapLog))) + + log.Info("Logr in action!", "the answer", 42) +} +``` + Increasing Verbosity -------------------- @@ -68,3 +100,8 @@ For the most part, concepts in Zap correspond directly with those in logr. Unlike Zap, all fields *must* be in the form of sugared fields -- it's illegal to pass a strongly-typed Zap field in a key position to any of the logging methods (`Log`, `Error`). + +The zapr `logr.LogSink` implementation also implements `logr.SlogHandler`. That +enables `slogr.NewSlogHandler` to provide a `slog.Handler` which just passes +parameters through to zapr. zapr handles special slog values (Group, +LogValuer), regardless of which front-end API is used. diff --git a/vendor/github.com/go-logr/zapr/slogzapr.go b/vendor/github.com/go-logr/zapr/slogzapr.go new file mode 100644 index 0000000000..84f56e4351 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/slogzapr.go @@ -0,0 +1,183 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zapr + +import ( + "context" + "log/slog" + "runtime" + + "github.com/go-logr/logr/slogr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var _ slogr.SlogSink = &zapLogger{} + +func (zl *zapLogger) Handle(_ context.Context, record slog.Record) error { + zapLevel := zap.InfoLevel + intLevel := 0 + isError := false + switch { + case record.Level >= slog.LevelError: + zapLevel = zap.ErrorLevel + isError = true + case record.Level >= slog.LevelWarn: + zapLevel = zap.WarnLevel + case record.Level >= 0: + // Already set above -> info. + default: + zapLevel = zapcore.Level(record.Level) + intLevel = int(-zapLevel) + } + + if checkedEntry := zl.l.Check(zapLevel, record.Message); checkedEntry != nil { + checkedEntry.Time = record.Time + checkedEntry.Caller = pcToCallerEntry(record.PC) + var fieldsBuffer [2]zap.Field + fields := fieldsBuffer[:0] + if !isError && zl.numericLevelKey != "" { + // Record verbosity for info entries. + fields = append(fields, zap.Int(zl.numericLevelKey, intLevel)) + } + // Inline all attributes. + fields = append(fields, zap.Inline(zapcore.ObjectMarshalerFunc(func(enc zapcore.ObjectEncoder) error { + record.Attrs(func(attr slog.Attr) bool { + encodeSlog(enc, attr) + return true + }) + return nil + }))) + checkedEntry.Write(fields...) + } + return nil +} + +func encodeSlog(enc zapcore.ObjectEncoder, attr slog.Attr) { + if attr.Equal(slog.Attr{}) { + // Ignore empty attribute. + return + } + + // Check in order of expected frequency, most common ones first. + // + // Usage statistics for parameters from Kubernetes 152876a3e, + // calculated with k/k/test/integration/logs/benchmark: + // + // kube-controller-manager -v10: + // strings: 10043 (85%) + // with API objects: 2 (0% of all arguments) + // types and their number of usage: NodeStatus:2 + // numbers: 792 (6%) + // ObjectRef: 292 (2%) + // others: 595 (5%) + // + // kube-scheduler -v10: + // strings: 1325 (40%) + // with API objects: 109 (3% of all arguments) + // types and their number of usage: PersistentVolume:50 PersistentVolumeClaim:59 + // numbers: 473 (14%) + // ObjectRef: 1305 (39%) + // others: 176 (5%) + + kind := attr.Value.Kind() + switch kind { + case slog.KindString: + enc.AddString(attr.Key, attr.Value.String()) + case slog.KindLogValuer: + // This includes klog.KObj. + encodeSlog(enc, slog.Attr{ + Key: attr.Key, + Value: attr.Value.Resolve(), + }) + case slog.KindInt64: + enc.AddInt64(attr.Key, attr.Value.Int64()) + case slog.KindUint64: + enc.AddUint64(attr.Key, attr.Value.Uint64()) + case slog.KindFloat64: + enc.AddFloat64(attr.Key, attr.Value.Float64()) + case slog.KindBool: + enc.AddBool(attr.Key, attr.Value.Bool()) + case slog.KindDuration: + enc.AddDuration(attr.Key, attr.Value.Duration()) + case slog.KindTime: + enc.AddTime(attr.Key, attr.Value.Time()) + case slog.KindGroup: + attrs := attr.Value.Group() + if attr.Key == "" { + // Inline group. + for _, attr := range attrs { + encodeSlog(enc, attr) + } + return + } + if len(attrs) == 0 { + // Ignore empty group. + return + } + _ = enc.AddObject(attr.Key, marshalAttrs(attrs)) + default: + // We have to go through reflection in zap.Any to get support + // for e.g. fmt.Stringer. + zap.Any(attr.Key, attr.Value.Any()).AddTo(enc) + } +} + +type marshalAttrs []slog.Attr + +func (attrs marshalAttrs) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, attr := range attrs { + encodeSlog(enc, attr) + } + return nil +} + +var _ zapcore.ObjectMarshaler = marshalAttrs(nil) + +func pcToCallerEntry(pc uintptr) zapcore.EntryCaller { + if pc == 0 { + return zapcore.EntryCaller{} + } + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{pc}) + f, _ := fs.Next() + if f.File == "" { + return zapcore.EntryCaller{} + } + return zapcore.EntryCaller{ + Defined: true, + PC: pc, + File: f.File, + Line: f.Line, + Function: f.Function, + } +} + +func (zl *zapLogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + newLogger := *zl + newLogger.l = newLogger.l.With(zap.Inline(marshalAttrs(attrs))) + return &newLogger +} + +func (zl *zapLogger) WithGroup(name string) slogr.SlogSink { + newLogger := *zl + newLogger.l = newLogger.l.With(zap.Namespace(name)) + return &newLogger +} diff --git a/vendor/github.com/go-logr/zapr/zapr.go b/vendor/github.com/go-logr/zapr/zapr.go index 8bb7fceb3f..c8503ab9ea 100644 --- a/vendor/github.com/go-logr/zapr/zapr.go +++ b/vendor/github.com/go-logr/zapr/zapr.go @@ -31,14 +31,14 @@ limitations under the License. // Package zapr defines an implementation of the github.com/go-logr/logr // interfaces built on top of Zap (go.uber.org/zap). // -// Usage +// # Usage // // A new logr.Logger can be constructed from an existing zap.Logger using // the NewLogger function: // -// log := zapr.NewLogger(someZapLogger) +// log := zapr.NewLogger(someZapLogger) // -// Implementation Details +// # Implementation Details // // For the most part, concepts in Zap correspond directly with those in // logr. @@ -168,15 +168,6 @@ func (zl *zapLogger) handleFields(lvl int, args []interface{}, additional ...zap return append(fields, additional...) } -func zapIt(field string, val interface{}) zap.Field { - // Handle types that implement logr.Marshaler: log the replacement - // object instead of the original one. - if marshaler, ok := val.(logr.Marshaler); ok { - field, val = invokeMarshaler(field, marshaler) - } - return zap.Any(field, val) -} - func invokeMarshaler(field string, m logr.Marshaler) (f string, ret interface{}) { defer func() { if r := recover(); r != nil { diff --git a/vendor/github.com/go-logr/zapr/zapr_noslog.go b/vendor/github.com/go-logr/zapr/zapr_noslog.go new file mode 100644 index 0000000000..ec8517b793 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/zapr_noslog.go @@ -0,0 +1,34 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zapr + +import ( + "github.com/go-logr/logr" + "go.uber.org/zap" +) + +func zapIt(field string, val interface{}) zap.Field { + // Handle types that implement logr.Marshaler: log the replacement + // object instead of the original one. + if marshaler, ok := val.(logr.Marshaler); ok { + field, val = invokeMarshaler(field, marshaler) + } + return zap.Any(field, val) +} diff --git a/vendor/github.com/go-logr/zapr/zapr_slog.go b/vendor/github.com/go-logr/zapr/zapr_slog.go new file mode 100644 index 0000000000..f07203604d --- /dev/null +++ b/vendor/github.com/go-logr/zapr/zapr_slog.go @@ -0,0 +1,48 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zapr + +import ( + "log/slog" + + "github.com/go-logr/logr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func zapIt(field string, val interface{}) zap.Field { + switch valTyped := val.(type) { + case logr.Marshaler: + // Handle types that implement logr.Marshaler: log the replacement + // object instead of the original one. + field, val = invokeMarshaler(field, valTyped) + case slog.LogValuer: + // The same for slog.LogValuer. We let slog.Value handle + // potential panics and recursion. + val = slog.AnyValue(val).Resolve() + } + if slogValue, ok := val.(slog.Value); ok { + return zap.Inline(zapcore.ObjectMarshalerFunc(func(enc zapcore.ObjectEncoder) error { + encodeSlog(enc, slog.Attr{Key: field, Value: slogValue}) + return nil + })) + } + return zap.Any(field, val) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 851d0180ba..9bf4fe30d4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -186,8 +186,9 @@ github.com/go-logfmt/logfmt ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr -# github.com/go-logr/zapr v1.2.4 -## explicit; go 1.16 +github.com/go-logr/logr/slogr +# github.com/go-logr/zapr v1.3.0 +## explicit; go 1.18 github.com/go-logr/zapr # github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 @@ -733,7 +734,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.6 +# k8s.io/api v0.29.7 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -790,7 +791,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.2 +# k8s.io/apiextensions-apiserver v0.29.7 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -847,7 +848,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.29.6 +# k8s.io/client-go v0.29.7 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1029,7 +1030,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.29.2 +# k8s.io/component-base v0.29.7 ## explicit; go 1.21 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 @@ -1069,8 +1070,8 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.27.11 -## explicit; go 1.20 +# k8s.io/kubectl v0.29.7 +## explicit; go 1.21 k8s.io/kubectl/pkg/scheme # k8s.io/utils v0.0.0-20240102154912-e7106e64919e ## explicit; go 1.18 @@ -1102,8 +1103,8 @@ knative.dev/pkg/metrics/metricskey knative.dev/pkg/signals knative.dev/pkg/tracker knative.dev/pkg/webhook/resourcesemantics -# sigs.k8s.io/controller-runtime v0.15.3 -## explicit; go 1.20 +# sigs.k8s.io/controller-runtime v0.17.6 +## explicit; go 1.21 sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/certwatcher @@ -1126,12 +1127,14 @@ sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/source +sigs.k8s.io/controller-runtime/pkg/internal/syncs sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/log/zap sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/metrics/server sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 7600387047..73ad68fe43 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -20,8 +20,11 @@ import ( "context" "fmt" "net/http" + "sort" "time" + "golang.org/x/exp/maps" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -31,6 +34,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,14 +47,28 @@ var ( defaultSyncPeriod = 10 * time.Hour ) +// InformerGetOptions defines the behavior of how informers are retrieved. +type InformerGetOptions internal.GetOptions + +// InformerGetOption defines an option that alters the behavior of how informers are retrieved. +type InformerGetOption func(*InformerGetOptions) + +// BlockUntilSynced determines whether a get request for an informer should block +// until the informer's cache has synced. +func BlockUntilSynced(shouldBlock bool) InformerGetOption { + return func(opts *InformerGetOptions) { + opts.BlockUntilSynced = &shouldBlock + } +} + // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), // and add indices to fields on the objects stored in the cache. type Cache interface { - // Cache acts as a client to objects stored in the cache. + // Reader acts as a client to objects stored in the cache. client.Reader - // Cache loads informers and adds field indices. + // Informers loads informers and adds field indices. Informers } @@ -60,49 +78,62 @@ type Cache interface { type Informers interface { // GetInformer fetches or constructs an informer for the given object that corresponds to a single // API kind and resource. - GetInformer(ctx context.Context, obj client.Object) (Informer, error) + GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead // of the underlying object. - GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) + + // RemoveInformer removes an informer entry and stops it if it was running. + RemoveInformer(ctx context.Context, obj client.Object) error // Start runs all the informers known to this cache until the context is closed. // It blocks. Start(ctx context.Context) error - // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. WaitForCacheSync(ctx context.Context) bool - // Informers knows how to add indices to the caches (informers) that it manages. + // FieldIndexer adds indices to the managed informers. client.FieldIndexer } -// Informer - informer allows you interact with the underlying informer. +// Informer allows you to interact with the underlying informer. type Informer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination + // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. // It returns a registration handle for the handler that can be used to remove - // the handler again. + // the handler again and an error if the handler cannot be added. AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is + // specified resync period. Events to a single handler are delivered sequentially, but there is // no coordination between different handlers. // It returns a registration handle for the handler that can be used to remove // the handler again and an error if the handler cannot be added. AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) - // RemoveEventHandler removes a formerly added event handler given by + + // RemoveEventHandler removes a previously added event handler given by // its registration handle. - // This function is guaranteed to be idempotent, and thread-safe. + // This function is guaranteed to be idempotent and thread-safe. RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error - // AddIndexers adds more indexers to this store. If you call this after you already have data + + // AddIndexers adds indexers to this store. If this is called after there is already data // in the store, the results are undefined. AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. HasSynced() bool + // IsStopped returns true if the informer has been stopped. + IsStopped() bool } -// Options are the optional arguments for creating a new InformersMap object. +// AllNamespaces should be used as the map key to deliminate namespace settings +// that apply to all namespaces that themselves do not have explicit settings. +const AllNamespaces = metav1.NamespaceAll + +// Options are the optional arguments for creating a new Cache object. type Options struct { // HTTPClient is the http client to use for the REST client HTTPClient *http.Client @@ -140,45 +171,96 @@ type Options struct { // instead of `reconcile.Result{}`. SyncPeriod *time.Duration - // Namespaces restricts the cache's ListWatch to the desired namespaces - // Default watches all namespaces - Namespaces []string + // ReaderFailOnMissingInformer configures the cache to return a ErrResourceNotCached error when a user + // requests, using Get() and List(), a resource the cache does not already have an informer for. + // + // This error is distinct from an errors.NotFound. + // + // Defaults to false, which means that the cache will start a new informer + // for every new requested resource. + ReaderFailOnMissingInformer bool + + // DefaultNamespaces maps namespace names to cache configs. If set, only + // the namespaces in here will be watched and it will by used to default + // ByObject.Namespaces for all objects if that is nil. + // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // + // The options in the Config that are nil will be defaulted from + // the respective Default* settings. + DefaultNamespaces map[string]Config - // DefaultLabelSelector will be used as a label selectors for all object types - // unless they have a more specific selector set in ByObject. + // DefaultLabelSelector will be used as a label selector for all objects + // unless there is already one set in ByObject or DefaultNamespaces. DefaultLabelSelector labels.Selector - // DefaultFieldSelector will be used as a field selectors for all object types - // unless they have a more specific selector set in ByObject. + // DefaultFieldSelector will be used as a field selector for all object types + // unless there is already one set in ByObject or DefaultNamespaces. DefaultFieldSelector fields.Selector // DefaultTransform will be used as transform for all object types - // unless they have a more specific transform set in ByObject. + // unless there is already one set in ByObject or DefaultNamespaces. DefaultTransform toolscache.TransformFunc - // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. - ByObject map[client.Object]ByObject + // DefaultWatchErrorHandler will be used to the WatchErrorHandler which is called + // whenever ListAndWatch drops the connection with an error. + // + // After calling this handler, the informer will backoff and retry. + DefaultWatchErrorHandler toolscache.WatchErrorHandler - // UnsafeDisableDeepCopy indicates not to deep copy objects during get or - // list objects for EVERY object. + // DefaultUnsafeDisableDeepCopy is the default for UnsafeDisableDeepCopy + // for everything that doesn't specify this. + // // Be very careful with this, when enabled you must DeepCopy any object before mutating it, // otherwise you will mutate the object in the cache. // - // This is a global setting for all objects, and can be overridden by the ByObject setting. - UnsafeDisableDeepCopy *bool + // This will be used for all object types, unless it is set in ByObject or + // DefaultNamespaces. + DefaultUnsafeDisableDeepCopy *bool + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + // object, this will fall through to Default* settings. + ByObject map[client.Object]ByObject + + // newInformer allows overriding of NewSharedIndexInformer for testing. + newInformer *func(toolscache.ListerWatcher, runtime.Object, time.Duration, toolscache.Indexers) toolscache.SharedIndexInformer } // ByObject offers more fine-grained control over the cache's ListWatch by object. type ByObject struct { + // Namespaces maps a namespace name to cache configs. If set, only the + // namespaces in this map will be cached. + // + // Settings in the map value that are unset will be defaulted. + // Use an empty value for the specific setting to prevent that. + // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // + // A nil map allows to default this to the cache's DefaultNamespaces setting. + // An empty map prevents this and means that all namespaces will be cached. + // + // The defaulting follows the following precedence order: + // 1. ByObject + // 2. DefaultNamespaces[namespace] + // 3. Default* + // + // This must be unset for cluster-scoped objects. + Namespaces map[string]Config + // Label represents a label selector for the object. Label labels.Selector // Field represents a field selector for the object. Field fields.Selector - // Transform is a map from objects to transformer functions which - // get applied when objects of the transformation are about to be committed - // to cache. + // Transform is a transformer function for the object which gets applied + // when objects of the transformation are about to be committed to the cache. // // This function is called both for new objects to enter the cache, // and for updated objects. @@ -191,48 +273,121 @@ type ByObject struct { UnsafeDisableDeepCopy *bool } +// Config describes all potential options for a given watch. +type Config struct { + // LabelSelector specifies a label selector. A nil value allows to + // default this. + // + // Set to labels.Everything() if you don't want this defaulted. + LabelSelector labels.Selector + + // FieldSelector specifics a field selector. A nil value allows to + // default this. + // + // Set to fields.Everything() if you don't want this defaulted. + FieldSelector fields.Selector + + // Transform specifies a transform func. A nil value allows to default + // this. + // + // Set to an empty func to prevent this: + // func(in interface{}) (interface{}, error) { return in, nil } + Transform toolscache.TransformFunc + + // UnsafeDisableDeepCopy specifies if List and Get requests against the + // cache should not DeepCopy. A nil value allows to default this. + UnsafeDisableDeepCopy *bool +} + // NewCacheFunc - Function for creating a new cache from the options and a rest config. type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. -func New(config *rest.Config, opts Options) (Cache, error) { - if len(opts.Namespaces) == 0 { - opts.Namespaces = []string{metav1.NamespaceAll} +func New(cfg *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(cfg, opts) + if err != nil { + return nil, err } - if len(opts.Namespaces) > 1 { - return newMultiNamespaceCache(config, opts) + + newCacheFunc := newCache(cfg, opts) + + var defaultCache Cache + if len(opts.DefaultNamespaces) > 0 { + defaultConfig := optionDefaultsToConfig(&opts) + defaultCache = newMultiNamespaceCache(newCacheFunc, opts.Scheme, opts.Mapper, opts.DefaultNamespaces, &defaultConfig) + } else { + defaultCache = newCacheFunc(optionDefaultsToConfig(&opts), corev1.NamespaceAll) } - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err + if len(opts.ByObject) == 0 { + return defaultCache, nil } - byGVK, err := convertToInformerOptsByGVK(opts.ByObject, opts.Scheme) - if err != nil { - return nil, err + delegating := &delegatingByGVKCache{ + scheme: opts.Scheme, + caches: make(map[schema.GroupVersionKind]Cache, len(opts.ByObject)), + defaultCache: defaultCache, + } + + for obj, config := range opts.ByObject { + gvk, err := apiutil.GVKForObject(obj, opts.Scheme) + if err != nil { + return nil, fmt.Errorf("failed to get GVK for type %T: %w", obj, err) + } + var cache Cache + if len(config.Namespaces) > 0 { + cache = newMultiNamespaceCache(newCacheFunc, opts.Scheme, opts.Mapper, config.Namespaces, nil) + } else { + cache = newCacheFunc(byObjectToConfig(config), corev1.NamespaceAll) + } + delegating.caches[gvk] = cache } - // Set the default selector and transform. - byGVK[schema.GroupVersionKind{}] = internal.InformersOptsByGVK{ - Selector: internal.Selector{ - Label: opts.DefaultLabelSelector, - Field: opts.DefaultFieldSelector, - }, + + return delegating, nil +} + +func optionDefaultsToConfig(opts *Options) Config { + return Config{ + LabelSelector: opts.DefaultLabelSelector, + FieldSelector: opts.DefaultFieldSelector, Transform: opts.DefaultTransform, - UnsafeDisableDeepCopy: opts.UnsafeDisableDeepCopy, + UnsafeDisableDeepCopy: opts.DefaultUnsafeDisableDeepCopy, + } +} + +func byObjectToConfig(byObject ByObject) Config { + return Config{ + LabelSelector: byObject.Label, + FieldSelector: byObject.Field, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, } +} - return &informerCache{ - scheme: opts.Scheme, - Informers: internal.NewInformers(config, &internal.InformersOpts{ - HTTPClient: opts.HTTPClient, - Scheme: opts.Scheme, - Mapper: opts.Mapper, - ResyncPeriod: *opts.SyncPeriod, - Namespace: opts.Namespaces[0], - ByGVK: byGVK, - }), - }, nil +type newCacheFunc func(config Config, namespace string) Cache + +func newCache(restConfig *rest.Config, opts Options) newCacheFunc { + return func(config Config, namespace string) Cache { + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(restConfig, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: namespace, + Selector: internal.Selector{ + Label: config.LabelSelector, + Field: config.FieldSelector, + }, + Transform: config.Transform, + WatchErrorHandler: opts.DefaultWatchErrorHandler, + UnsafeDisableDeepCopy: ptr.Deref(config.UnsafeDisableDeepCopy, false), + NewInformer: opts.newInformer, + }), + readerFailOnMissingInformer: opts.ReaderFailOnMissingInformer, + } + } } func defaultOpts(config *rest.Config, opts Options) (Options, error) { @@ -241,15 +396,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { config.UserAgent = rest.DefaultKubernetesUserAgent() } - logger := log.WithName("setup") - // Use the rest HTTP client for the provided config if unset if opts.HTTPClient == nil { var err error opts.HTTPClient, err = rest.HTTPClientFor(config) if err != nil { - logger.Error(err, "Failed to get HTTP client") - return opts, fmt.Errorf("could not create HTTP client from config: %w", err) + return Options{}, fmt.Errorf("could not create HTTP client from config: %w", err) } } @@ -261,13 +413,81 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) + opts.Mapper, err = apiutil.NewDynamicRESTMapper(config, opts.HTTPClient) if err != nil { - logger.Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) + return Options{}, fmt.Errorf("could not create RESTMapper from config: %w", err) } } + for obj, byObject := range opts.ByObject { + isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper) + if err != nil { + return opts, fmt.Errorf("failed to determine if %T is namespaced: %w", obj, err) + } + if !isNamespaced && byObject.Namespaces != nil { + return opts, fmt.Errorf("type %T is not namespaced, but its ByObject.Namespaces setting is not nil", obj) + } + + if isNamespaced && byObject.Namespaces == nil { + byObject.Namespaces = maps.Clone(opts.DefaultNamespaces) + } + + // Default the namespace-level configs first, because they need to use the undefaulted type-level config + // to be able to potentially fall through to settings from DefaultNamespaces. + for namespace, config := range byObject.Namespaces { + // 1. Default from the undefaulted type-level config + config = defaultConfig(config, byObjectToConfig(byObject)) + + // 2. Default from the namespace-level config. This was defaulted from the global default config earlier, but + // might not have an entry for the current namespace. + if defaultNamespaceSettings, hasDefaultNamespace := opts.DefaultNamespaces[namespace]; hasDefaultNamespace { + config = defaultConfig(config, defaultNamespaceSettings) + } + + // 3. Default from the global defaults + config = defaultConfig(config, optionDefaultsToConfig(&opts)) + + if namespace == metav1.NamespaceAll { + config.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(byObject.Namespaces)), + config.FieldSelector, + )..., + ) + } + + byObject.Namespaces[namespace] = config + } + + // Only default ByObject iself if it isn't namespaced or has no namespaces configured, as only + // then any of this will be honored. + if !isNamespaced || len(byObject.Namespaces) == 0 { + defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) + byObject.Label = defaultedConfig.LabelSelector + byObject.Field = defaultedConfig.FieldSelector + byObject.Transform = defaultedConfig.Transform + byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy + } + + opts.ByObject[obj] = byObject + } + + // Default namespaces after byObject has been defaulted, otherwise a namespace without selectors + // will get the `Default` selectors, then get copied to byObject and then not get defaulted from + // byObject, as it already has selectors. + for namespace, cfg := range opts.DefaultNamespaces { + cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), + cfg.FieldSelector, + )..., + ) + } + opts.DefaultNamespaces[namespace] = cfg + } + // Default the resync period to 10 hours if unset if opts.SyncPeriod == nil { opts.SyncPeriod = &defaultSyncPeriod @@ -275,24 +495,38 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { return opts, nil } -func convertToInformerOptsByGVK(in map[client.Object]ByObject, scheme *runtime.Scheme) (map[schema.GroupVersionKind]internal.InformersOptsByGVK, error) { - out := map[schema.GroupVersionKind]internal.InformersOptsByGVK{} - for object, byObject := range in { - gvk, err := apiutil.GVKForObject(object, scheme) - if err != nil { - return nil, err - } - if _, ok := out[gvk]; ok { - return nil, fmt.Errorf("duplicate cache options for GVK %v, cache.Options.ByObject has multiple types with the same GroupVersionKind", gvk) - } - out[gvk] = internal.InformersOptsByGVK{ - Selector: internal.Selector{ - Field: byObject.Field, - Label: byObject.Label, - }, - Transform: byObject.Transform, - UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, +func defaultConfig(toDefault, defaultFrom Config) Config { + if toDefault.LabelSelector == nil { + toDefault.LabelSelector = defaultFrom.LabelSelector + } + if toDefault.FieldSelector == nil { + toDefault.FieldSelector = defaultFrom.FieldSelector + } + if toDefault.Transform == nil { + toDefault.Transform = defaultFrom.Transform + } + if toDefault.UnsafeDisableDeepCopy == nil { + toDefault.UnsafeDisableDeepCopy = defaultFrom.UnsafeDisableDeepCopy + } + + return toDefault +} + +func namespaceAllSelector(namespaces []string) []fields.Selector { + selectors := make([]fields.Selector, 0, len(namespaces)-1) + sort.Strings(namespaces) + for _, namespace := range namespaces { + if namespace != metav1.NamespaceAll { + selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) } } - return out, nil + + return selectors +} + +func appendIfNotNil[T comparable](a []T, b T) []T { + if b != *new(T) { + return append(a, b) + } + return a } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go new file mode 100644 index 0000000000..4db8208a63 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go @@ -0,0 +1,135 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "strings" + "sync" + + "golang.org/x/exp/maps" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// delegatingByGVKCache delegates to a type-specific cache if present +// and uses the defaultCache otherwise. +type delegatingByGVKCache struct { + scheme *runtime.Scheme + caches map[schema.GroupVersionKind]Cache + defaultCache Cache +} + +func (dbt *delegatingByGVKCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.Get(ctx, key, obj, opts...) +} + +func (dbt *delegatingByGVKCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + cache, err := dbt.cacheForObject(list) + if err != nil { + return err + } + return cache.List(ctx, list, opts...) +} + +func (dbt *delegatingByGVKCache) RemoveInformer(ctx context.Context, obj client.Object) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.RemoveInformer(ctx, obj) +} + +func (dbt *delegatingByGVKCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return nil, err + } + return cache.GetInformer(ctx, obj, opts...) +} + +func (dbt *delegatingByGVKCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { + return dbt.cacheForGVK(gvk).GetInformerForKind(ctx, gvk, opts...) +} + +func (dbt *delegatingByGVKCache) Start(ctx context.Context) error { + allCaches := maps.Values(dbt.caches) + allCaches = append(allCaches, dbt.defaultCache) + + wg := &sync.WaitGroup{} + errs := make(chan error) + for idx := range allCaches { + cache := allCaches[idx] + wg.Add(1) + go func() { + defer wg.Done() + if err := cache.Start(ctx); err != nil { + errs <- err + } + }() + } + + select { + case err := <-errs: + return err + case <-ctx.Done(): + wg.Wait() + return nil + } +} + +func (dbt *delegatingByGVKCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range append(maps.Values(dbt.caches), dbt.defaultCache) { + if !cache.WaitForCacheSync(ctx) { + synced = false + } + } + + return synced +} + +func (dbt *delegatingByGVKCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.IndexField(ctx, obj, field, extractValue) +} + +func (dbt *delegatingByGVKCache) cacheForObject(o runtime.Object) (Cache, error) { + gvk, err := apiutil.GVKForObject(o, dbt.scheme) + if err != nil { + return nil, err + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + return dbt.cacheForGVK(gvk), nil +} + +func (dbt *delegatingByGVKCache) cacheForGVK(gvk schema.GroupVersionKind) Cache { + if specific, hasSpecific := dbt.caches[gvk]; hasSpecific { + return specific + } + + return dbt.defaultCache +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 771244d52a..091667b7fa 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -45,11 +46,28 @@ func (*ErrCacheNotStarted) Error() string { return "the cache is not started, can not read objects" } +var _ error = (*ErrCacheNotStarted)(nil) + +// ErrResourceNotCached indicates that the resource type +// the client asked the cache for is not cached, i.e. the +// corresponding informer does not exist yet. +type ErrResourceNotCached struct { + GVK schema.GroupVersionKind +} + +// Error returns the error +func (r ErrResourceNotCached) Error() string { + return fmt.Sprintf("%s is not cached", r.GVK.String()) +} + +var _ error = (*ErrResourceNotCached)(nil) + // informerCache is a Kubernetes Object cache populated from internal.Informers. // informerCache wraps internal.Informers. type informerCache struct { scheme *runtime.Scheme *internal.Informers + readerFailOnMissingInformer bool } // Get implements Reader. @@ -59,7 +77,7 @@ func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie return err } - started, cache, err := ic.Informers.Get(ctx, gvk, out) + started, cache, err := ic.getInformerForKind(ctx, gvk, out) if err != nil { return err } @@ -67,7 +85,7 @@ func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie if !started { return &ErrCacheNotStarted{} } - return cache.Reader.Get(ctx, key, out) + return cache.Reader.Get(ctx, key, out, opts...) } // List implements Reader. @@ -77,7 +95,7 @@ func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts . return err } - started, cache, err := ic.Informers.Get(ctx, *gvk, cacheTypeObj) + started, cache, err := ic.getInformerForKind(ctx, *gvk, cacheTypeObj) if err != nil { return err } @@ -123,33 +141,64 @@ func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schem return &gvk, cacheTypeObj, nil } -// GetInformerForKind returns the informer for the GroupVersionKind. -func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { +func applyGetOptions(opts ...InformerGetOption) *internal.GetOptions { + cfg := &InformerGetOptions{} + for _, opt := range opts { + opt(cfg) + } + return (*internal.GetOptions)(cfg) +} + +// GetInformerForKind returns the informer for the GroupVersionKind. If no informer exists, one will be started. +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { // Map the gvk to an object obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - _, i, err := ic.Informers.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil } -// GetInformer returns the informer for the obj. -func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { +// GetInformer returns the informer for the obj. If no informer exists, one will be started. +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - _, i, err := ic.Informers.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil +} + +func (ic *informerCache) getInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *internal.Cache, error) { + if ic.readerFailOnMissingInformer { + cache, started, ok := ic.Informers.Peek(gvk, obj) + if !ok { + return false, nil, &ErrResourceNotCached{GVK: gvk} + } + return started, cache, nil + } + + return ic.Informers.Get(ctx, gvk, obj, &internal.GetOptions{}) +} + +// RemoveInformer deactivates and removes the informer from the cache. +func (ic *informerCache) RemoveInformer(_ context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) + if err != nil { + return err + } + + ic.Informers.Remove(gvk, obj) + return nil } // NeedLeaderElection implements the LeaderElectionRunnable interface @@ -158,11 +207,11 @@ func (ic *informerCache) NeedLeaderElection() bool { return false } -// IndexField adds an indexer to the underlying cache, using extraction function to get -// value(s) from the given field. This index can then be used by passing a field selector +// IndexField adds an indexer to the underlying informer, using extractValue function to get +// value(s) from the given field. This index can then be used by passing a field selector // to List. For one-to-one compatibility with "normal" field selectors, only return one value. -// The values may be anything. They will automatically be prefixed with the namespace of the -// given object, if present. The objects passed are guaranteed to be objects of the correct type. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { informer, err := ic.GetInformer(ctx, obj) if err != nil { @@ -171,7 +220,7 @@ func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, fiel return indexByField(informer, field, extractValue) } -func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { +func indexByField(informer Informer, field string, extractValue client.IndexerFunc) error { indexFunc := func(objRaw interface{}) ([]string, error) { // TODO(directxman12): check if this is the correct type? obj, isObj := objRaw.(client.Object) @@ -184,7 +233,7 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) } ns := meta.GetNamespace() - rawVals := extractor(obj) + rawVals := extractValue(obj) var vals []string if ns == "" { // if we're not doubling the keys for the namespaced case, just create a new slice with same length @@ -207,5 +256,5 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) return vals, nil } - return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) + return informer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index 3c8355bbde..2e4f5ce527 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -23,6 +23,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -53,7 +54,7 @@ type CacheReader struct { } // Get checks the indexer for the object and writes a copy of it if found. -func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, _ ...client.GetOption) error { if c.scopeName == apimeta.RESTScopeNameRoot { key.Namespace = "" } @@ -67,9 +68,9 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob // Not found, return an error if !exists { - // Resource gets transformed into Kind in the error anyway, so this is fine return apierrors.NewNotFound(schema.GroupResource{ - Group: c.groupVersionKind.Group, + Group: c.groupVersionKind.Group, + // Resource gets set as Kind in the error so this is fine Resource: c.groupVersionKind.Kind, }, key.Name) } @@ -111,18 +112,20 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) + if listOpts.Continue != "" { + return fmt.Errorf("continue list option is not supported by the cache") + } + switch { case listOpts.FieldSelector != nil: - // TODO(directxman12): support more complicated field selectors by - // combining multiple indices, GetIndexers, etc - field, val, requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) + requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") } - // list all objects by the field selector. If this is namespaced and we have one, ask for the - // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. - objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + objs, err = byIndexes(c.indexer, listOpts.FieldSelector.Requirements(), listOpts.Namespace) case listOpts.Namespace != "": objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) default: @@ -174,8 +177,56 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli return apimeta.SetList(out, runtimeObjs) } +func byIndexes(indexer cache.Indexer, requires fields.Requirements, namespace string) ([]interface{}, error) { + var ( + err error + objs []interface{} + vals []string + ) + indexers := indexer.GetIndexers() + for idx, req := range requires { + indexName := FieldIndexName(req.Field) + indexedValue := KeyToNamespacedKey(namespace, req.Value) + if idx == 0 { + // we use first require to get snapshot data + // TODO(halfcrazy): use complicated index when client-go provides byIndexes + // https://github.com/kubernetes/kubernetes/issues/109329 + objs, err = indexer.ByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + if len(objs) == 0 { + return nil, nil + } + continue + } + fn, exist := indexers[indexName] + if !exist { + return nil, fmt.Errorf("index with name %s does not exist", indexName) + } + filteredObjects := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + vals, err = fn(obj) + if err != nil { + return nil, err + } + for _, val := range vals { + if val == indexedValue { + filteredObjects = append(filteredObjects, obj) + break + } + } + } + if len(filteredObjects) == 0 { + return nil, nil + } + objs = filteredObjects + } + return objs, nil +} + // objectKeyToStorageKey converts an object key to store key. -// It's akin to MetaNamespaceKeyFunc. It's separate from +// It's akin to MetaNamespaceKeyFunc. It's separate from // String to allow keeping the key format easily in sync with // MetaNamespaceKeyFunc. func objectKeyToStoreKey(k client.ObjectKey) string { @@ -191,7 +242,7 @@ func FieldIndexName(field string) string { return "field:" + field } -// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +// allNamespacesNamespace is used as the "namespace" when we want to list across all namespaces. const allNamespacesNamespace = "__all_namespaces" // KeyToNamespacedKey prefixes the given index key with a namespace diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go index 09e0111114..c270e809ca 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -36,28 +36,29 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/internal/syncs" ) // InformersOpts configures an InformerMap. type InformersOpts struct { - HTTPClient *http.Client - Scheme *runtime.Scheme - Mapper meta.RESTMapper - ResyncPeriod time.Duration - Namespace string - ByGVK map[schema.GroupVersionKind]InformersOptsByGVK -} - -// InformersOptsByGVK configured additional by group version kind (or object) -// in an InformerMap. -type InformersOptsByGVK struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + NewInformer *func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer Selector Selector Transform cache.TransformFunc - UnsafeDisableDeepCopy *bool + UnsafeDisableDeepCopy bool + WatchErrorHandler cache.WatchErrorHandler } // NewInformers creates a new InformersMap that can create informers under the hood. func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + newInformer := cache.NewSharedIndexInformer + if options.NewInformer != nil { + newInformer = *options.NewInformer + } return &Informers{ config: config, httpClient: options.HTTPClient, @@ -68,12 +69,16 @@ func NewInformers(config *rest.Config, options *InformersOpts) *Informers { Unstructured: make(map[schema.GroupVersionKind]*Cache), Metadata: make(map[schema.GroupVersionKind]*Cache), }, - codecs: serializer.NewCodecFactory(options.Scheme), - paramCodec: runtime.NewParameterCodec(options.Scheme), - resync: options.ResyncPeriod, - startWait: make(chan struct{}), - namespace: options.Namespace, - byGVK: options.ByGVK, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + selector: options.Selector, + transform: options.Transform, + unsafeDisableDeepCopy: options.UnsafeDisableDeepCopy, + newInformer: newInformer, + watchErrorHandler: options.WatchErrorHandler, } } @@ -84,6 +89,20 @@ type Cache struct { // CacheReader wraps Informer and implements the CacheReader interface for a single type Reader CacheReader + + // Stop can be used to stop this individual informer. + stop chan struct{} +} + +// Start starts the informer managed by a MapEntry. +// Blocks until the informer stops. The informer can be stopped +// either individually (via the entry's stop channel) or globally +// via the provided stop argument. +func (c *Cache) Start(stop <-chan struct{}) { + // Stop on either the whole map stopping or just this informer being removed. + internalStop, cancel := syncs.MergeChans(stop, c.stop) + defer cancel() + c.Informer.Run(internalStop) } type tracker struct { @@ -92,6 +111,13 @@ type tracker struct { Metadata map[schema.GroupVersionKind]*Cache } +// GetOptions provides configuration to customize the behavior when +// getting an informer. +type GetOptions struct { + // BlockUntilSynced controls if the informer retrieval will block until the informer is synced. Defaults to `true`. + BlockUntilSynced *bool +} + // Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. // It uses a standard parameter codec constructed based on the given generated Scheme. type Informers struct { @@ -144,49 +170,20 @@ type Informers struct { // default or empty string means all namespaces namespace string - byGVK map[schema.GroupVersionKind]InformersOptsByGVK -} + selector Selector + transform cache.TransformFunc + unsafeDisableDeepCopy bool -func (ip *Informers) getSelector(gvk schema.GroupVersionKind) Selector { - if ip.byGVK == nil { - return Selector{} - } - if res, ok := ip.byGVK[gvk]; ok { - return res.Selector - } - if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { - return res.Selector - } - return Selector{} -} + // NewInformer allows overriding of the shared index informer constructor for testing. + newInformer func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer -func (ip *Informers) getTransform(gvk schema.GroupVersionKind) cache.TransformFunc { - if ip.byGVK == nil { - return nil - } - if res, ok := ip.byGVK[gvk]; ok { - return res.Transform - } - if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { - return res.Transform - } - return nil + // WatchErrorHandler allows the shared index informer's + // watchErrorHandler to be set by overriding the options + // or to use the default watchErrorHandler + watchErrorHandler cache.WatchErrorHandler } -func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { - if ip.byGVK == nil { - return false - } - if res, ok := ip.byGVK[gvk]; ok && res.UnsafeDisableDeepCopy != nil { - return *res.UnsafeDisableDeepCopy - } - if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok && res.UnsafeDisableDeepCopy != nil { - return *res.UnsafeDisableDeepCopy - } - return false -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// Start calls Run on each of the informers and sets started to true. Blocks on the context. // It doesn't return start because it can't return an error, and it's not a runnable directly. func (ip *Informers) Start(ctx context.Context) error { func() { @@ -198,13 +195,13 @@ func (ip *Informers) Start(ctx context.Context) error { // Start each informer for _, i := range ip.tracker.Structured { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } for _, i := range ip.tracker.Unstructured { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } for _, i := range ip.tracker.Metadata { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } // Set started to true so we immediately start any informers added later. @@ -219,7 +216,7 @@ func (ip *Informers) Start(ctx context.Context) error { return nil } -func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { +func (ip *Informers) startInformerLocked(cacheEntry *Cache) { // Don't start the informer in case we are already waiting for the items in // the waitGroup to finish, since waitGroups don't support waiting and adding // at the same time. @@ -230,7 +227,7 @@ func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { ip.waitGroup.Add(1) go func() { defer ip.waitGroup.Done() - informer.Run(ip.ctx.Done()) + cacheEntry.Start(ip.ctx.Done()) }() } @@ -271,18 +268,19 @@ func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) } -func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { +// Peek attempts to get the informer for the GVK, but does not start one if one does not exist. +func (ip *Informers) Peek(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { ip.mu.RLock() defer ip.mu.RUnlock() i, ok := ip.informersByType(obj)[gvk] return i, ip.started, ok } -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns // the Informer from the map. -func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object, opts *GetOptions) (bool, *Cache, error) { // Return the informer if it is found - i, started, ok := ip.get(gvk, obj) + i, started, ok := ip.Peek(gvk, obj) if !ok { var err error if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { @@ -290,7 +288,12 @@ func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj r } } - if started && !i.Informer.HasSynced() { + shouldBlock := true + if opts.BlockUntilSynced != nil { + shouldBlock = *opts.BlockUntilSynced + } + + if shouldBlock && started && !i.Informer.HasSynced() { // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) @@ -300,6 +303,21 @@ func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj r return started, i, nil } +// Remove removes an informer entry and stops it if it was running. +func (ip *Informers) Remove(gvk schema.GroupVersionKind, obj runtime.Object) { + ip.mu.Lock() + defer ip.mu.Unlock() + + informerMap := ip.informersByType(obj) + + entry, ok := informerMap[gvk] + if !ok { + return + } + close(entry.stop) + delete(informerMap, gvk) +} + func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { switch obj.(type) { case runtime.Unstructured: @@ -311,11 +329,12 @@ func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersion } } +// addInformerToMap either returns an existing informer or creates a new informer, adds it to the map and returns it. func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { ip.mu.Lock() defer ip.mu.Unlock() - // Check the cache to see if we already have an Informer. If we do, return the Informer. + // Check the cache to see if we already have an Informer. If we do, return the Informer. // This is for the case where 2 routines tried to get the informer when it wasn't in the map // so neither returned early, but the first one created it. if i, ok := ip.informersByType(obj)[gvk]; ok { @@ -327,13 +346,13 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O if err != nil { return nil, false, err } - sharedIndexInformer := cache.NewSharedIndexInformer(&cache.ListWatch{ + sharedIndexInformer := ip.newInformer(&cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.getSelector(gvk).ApplyToList(&opts) + ip.selector.ApplyToList(&opts) return listWatcher.ListFunc(opts) }, WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.getSelector(gvk).ApplyToList(&opts) + ip.selector.ApplyToList(&opts) opts.Watch = true // Watch needs to be set to true separately return listWatcher.WatchFunc(opts) }, @@ -341,8 +360,15 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, }) + // Set WatchErrorHandler on SharedIndexInformer if set + if ip.watchErrorHandler != nil { + if err := sharedIndexInformer.SetWatchErrorHandler(ip.watchErrorHandler); err != nil { + return nil, false, err + } + } + // Check to see if there is a transformer for this gvk - if err := sharedIndexInformer.SetTransform(ip.getTransform(gvk)); err != nil { + if err := sharedIndexInformer.SetTransform(ip.transform); err != nil { return nil, false, err } @@ -358,15 +384,16 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O indexer: sharedIndexInformer.GetIndexer(), groupVersionKind: gvk, scopeName: mapping.Scope.Name(), - disableDeepCopy: ip.getDisableDeepCopy(gvk), + disableDeepCopy: ip.unsafeDisableDeepCopy, }, + stop: make(chan struct{}), } ip.informersByType(obj)[gvk] = i // Start the informer in case the InformersMap has started, otherwise it will be // started when the InformersMap starts. if ip.started { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } return i, ip.started, nil } @@ -382,7 +409,7 @@ func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Ob // Figure out if the GVK we're dealing with is global, or namespace scoped. var namespace string if mapping.Scope.Name() == meta.RESTScopeNameNamespace { - namespace = restrictNamespaceBySelector(ip.namespace, ip.getSelector(gvk)) + namespace = restrictNamespaceBySelector(ip.namespace, ip.selector) } switch obj.(type) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go deleted file mode 100644 index 0725f550c5..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go +++ /dev/null @@ -1,55 +0,0 @@ -package internal - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// TransformFuncByGVK provides access to the correct transform function for -// any given GVK. -type TransformFuncByGVK interface { - Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error - Get(schema.GroupVersionKind) cache.TransformFunc - SetDefault(transformer cache.TransformFunc) -} - -type transformFuncByGVK struct { - defaultTransform cache.TransformFunc - transformers map[schema.GroupVersionKind]cache.TransformFunc -} - -// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that -// maps GVKs to TransformFuncs. -func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { - byGVK := &transformFuncByGVK{} - if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { - byGVK.defaultTransform = defaultFunc - } - delete(in, schema.GroupVersionKind{}) - byGVK.transformers = in - return byGVK -} - -func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { - t.defaultTransform = transformer -} - -func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return err - } - - t.transformers[gvk] = transformer - return nil -} - -func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { - if val, ok := t.transformers[gvk]; ok { - return val - } - return t.defaultTransform -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index ac97beae94..e38da1455c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -23,10 +23,11 @@ import ( corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -34,49 +35,31 @@ import ( // a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" -// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. -// This will scope the cache to a list of namespaces. Listing for all namespaces -// will list for all the namespaces that this knows about. By default this will create -// a global cache for cluster scoped resource. Note that this is not intended -// to be used for excluding namespaces, this is better done via a Predicate. Also note that -// you may face performance issues when using this with a high number of namespaces. -// -// Deprecated: Use cache.Options.Namespaces instead. -func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { - return func(config *rest.Config, opts Options) (Cache, error) { - opts.Namespaces = namespaces - return newMultiNamespaceCache(config, opts) - } -} - -func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { - if len(opts.Namespaces) < 2 { - return nil, fmt.Errorf("must specify more than one namespace to use multi-namespace cache") - } - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err - } - +func newMultiNamespaceCache( + newCache newCacheFunc, + scheme *runtime.Scheme, + restMapper apimeta.RESTMapper, + namespaces map[string]Config, + globalConfig *Config, // may be nil in which case no cache for cluster-scoped objects will be created +) Cache { // Create every namespace cache. caches := map[string]Cache{} - for _, ns := range opts.Namespaces { - opts.Namespaces = []string{ns} - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c + for namespace, config := range namespaces { + caches[namespace] = newCache(config, namespace) } - // Create a cache for cluster scoped resources. - opts.Namespaces = []string{} - gCache, err := New(config, opts) - if err != nil { - return nil, fmt.Errorf("error creating global cache: %w", err) + // Create a cache for cluster scoped resources if requested + var clusterCache Cache + if globalConfig != nil { + clusterCache = newCache(*globalConfig, corev1.NamespaceAll) } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + return &multiNamespaceCache{ + namespaceToCache: caches, + Scheme: scheme, + RESTMapper: restMapper, + clusterCache: clusterCache, + } } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -84,90 +67,117 @@ func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { // operator to a list of namespaces instead of watching every namespace // in the cluster. type multiNamespaceCache struct { - namespaceToCache map[string]Cache Scheme *runtime.Scheme RESTMapper apimeta.RESTMapper + namespaceToCache map[string]Cache clusterCache Cache } var _ Cache = &multiNamespaceCache{} // Methods for multiNamespaceCache to conform to the Informers interface. -func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - informers := map[string]Informer{} - // If the object is clusterscoped, get the informer from clusterCache, +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { + // If the object is cluster scoped, get the informer from clusterCache, // if not use the namespaced caches. isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } if !isNamespaced { - clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + clusterCacheInformer, err := c.clusterCache.GetInformer(ctx, obj, opts...) if err != nil { return nil, err } - informers[globalCache] = clusterCacheInf - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil } + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { - informer, err := cache.GetInformer(ctx, obj) + informer, err := cache.GetInformer(ctx, obj, opts...) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } -func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { - informers := map[string]Informer{} - +func (c *multiNamespaceCache) RemoveInformer(ctx context.Context, obj client.Object) error { // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + if !isNamespaced { + return c.clusterCache.RemoveInformer(ctx, obj) + } + + for _, cache := range c.namespaceToCache { + err := cache.RemoveInformer(ctx, obj) + if err != nil { + return err + } + } + + return nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { + // If the object is cluster scoped, get the informer from clusterCache, + // if not use the namespaced caches. isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } if !isNamespaced { - clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + clusterCacheInformer, err := c.clusterCache.GetInformerForKind(ctx, gvk, opts...) if err != nil { return nil, err } - informers[globalCache] = clusterCacheInf - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil } + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { - informer, err := cache.GetInformerForKind(ctx, gvk) + informer, err := cache.GetInformerForKind(ctx, gvk, opts...) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } func (c *multiNamespaceCache) Start(ctx context.Context) error { // start global cache - go func() { - err := c.clusterCache.Start(ctx) - if err != nil { - log.Error(err, "cluster scoped cache failed to start") - } - }() + if c.clusterCache != nil { + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + log.Error(err, "cluster scoped cache failed to start") + } + }() + } // start namespaced caches for ns, cache := range c.namespaceToCache { go func(ns string, cache Cache) { - err := cache.Start(ctx) - if err != nil { - log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + if err := cache.Start(ctx); err != nil { + log.Error(err, "multi-namespace cache failed to start namespaced informer", "namespace", ns) } }(ns, cache) } @@ -179,13 +189,13 @@ func (c *multiNamespaceCache) Start(ctx context.Context) error { func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { synced := true for _, cache := range c.namespaceToCache { - if s := cache.WaitForCacheSync(ctx); !s { - synced = s + if !cache.WaitForCacheSync(ctx) { + synced = false } } // check if cluster scoped cache has synced - if !c.clusterCache.WaitForCacheSync(ctx) { + if c.clusterCache != nil && !c.clusterCache.WaitForCacheSync(ctx) { synced = false } return synced @@ -222,9 +232,12 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj cache, ok := c.namespaceToCache[key.Namespace] if !ok { + if global, hasGlobal := c.namespaceToCache[metav1.NamespaceAll]; hasGlobal { + return global.Get(ctx, key, obj, opts...) + } return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) } - return cache.Get(ctx, key, obj) + return cache.Get(ctx, key, obj, opts...) } // List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. @@ -245,7 +258,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, if listOpts.Namespace != corev1.NamespaceAll { cache, ok := c.namespaceToCache[listOpts.Namespace] if !ok { - return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + return fmt.Errorf("unable to list: %v because of unknown namespace for the cache", listOpts.Namespace) } return cache.List(ctx, list, opts...) } @@ -278,12 +291,14 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return fmt.Errorf("object: %T must be a list type", list) } allItems = append(allItems, items...) + // The last list call should have the most correct resource version. resourceVersion = accessor.GetResourceVersion() if limitSet { // decrement Limit by the number of items // fetched from the current namespace. listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of // items read has reached this set limit, // then stop reading. @@ -325,9 +340,12 @@ func (h handlerRegistration) HasSynced() bool { var _ Informer = &multiNamespaceInformer{} -// AddEventHandler adds the handler to each namespaced informer. +// AddEventHandler adds the handler to each informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { @@ -335,12 +353,16 @@ func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEven } handles.handles[ns] = registration } + return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { @@ -348,14 +370,15 @@ func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolsca } handles.handles[ns] = registration } + return handles, nil } -// RemoveEventHandler removes a formerly added event handler given by its registration handle. +// RemoveEventHandler removes a previously added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { handles, ok := h.(handlerRegistration) if !ok { - return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") + return fmt.Errorf("registration is not a registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { registration, ok := handles.handles[ns] @@ -369,7 +392,7 @@ func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHa return nil } -// AddIndexers adds the indexer for each namespaced informer. +// AddIndexers adds the indexers to each informer. func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { for _, informer := range i.namespaceToInformer { err := informer.AddIndexers(indexers) @@ -380,11 +403,21 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error return nil } -// HasSynced checks if each namespaced informer has synced. +// HasSynced checks if each informer has synced. func (i *multiNamespaceInformer) HasSynced() bool { for _, informer := range i.namespaceToInformer { - if ok := informer.HasSynced(); !ok { - return ok + if !informer.HasSynced() { + return false + } + } + return true +} + +// IsStopped checks if each namespaced informer has stopped, returns false if any are still running. +func (i *multiNamespaceInformer) IsStopped() bool { + for _, informer := range i.namespaceToInformer { + if stopped := informer.IsStopped(); !stopped { + return false } } return true diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 2b9b60d8d7..fe15fc0dd7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -173,14 +173,14 @@ func (cw *CertWatcher) ReadCertificate() error { func (cw *CertWatcher) handleEvent(event fsnotify.Event) { // Only care about events which may modify the contents of the file. - if !(isWrite(event) || isRemove(event) || isCreate(event)) { + if !(isWrite(event) || isRemove(event) || isCreate(event) || isChmod(event)) { return } log.V(1).Info("certificate event", "event", event) - // If the file was removed, re-add the watch. - if isRemove(event) { + // If the file was removed or renamed, re-add the watch to the previous name + if isRemove(event) || isChmod(event) { if err := cw.watcher.Add(event.Name); err != nil { log.Error(err, "error re-watching file") } @@ -202,3 +202,7 @@ func isCreate(event fsnotify.Event) bool { func isRemove(event fsnotify.Event) bool { return event.Op.Has(fsnotify.Remove) } + +func isChmod(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Chmod) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 6a1bfb546e..3c0206bea5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -31,11 +31,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" ) var ( @@ -60,25 +58,6 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { return addToScheme(protobufScheme) } -// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery -// information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { - if httpClient == nil { - return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") - } - - // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) - if err != nil { - return nil, err - } - gr, err := restmapper.GetAPIGroupResources(dc) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(gr), nil -} - // IsObjectNamespaced returns true if the object is namespace scoped. // For unstructured objects the gvk is found from the object itself. func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go new file mode 100644 index 0000000000..c216c49d2a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "sort" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. +// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for +// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. +type ErrResourceDiscoveryFailed map[schema.GroupVersion]error + +// Error implements the error interface. +func (e *ErrResourceDiscoveryFailed) Error() string { + subErrors := []string{} + for k, v := range *e { + subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(subErrors) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) +} + +func (e *ErrResourceDiscoveryFailed) Unwrap() []error { + subErrors := []error{} + for gv, err := range *e { + if apierrors.IsNotFound(err) { + err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} + } + subErrors = append(subErrors, err) + } + return subErrors +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index e0ff72dc13..927be22b4e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -21,6 +21,7 @@ import ( "net/http" "sync" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -52,7 +53,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM // client for discovery information to do REST mappings. type mapper struct { mapper meta.RESTMapper - client *discovery.DiscoveryClient + client discovery.DiscoveryInterface knownGroups map[string]*restmapper.APIGroupResources apiGroups map[string]*metav1.APIGroup @@ -166,8 +167,10 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er if err != nil { return err } - for _, version := range apiGroup.Versions { - versions = append(versions, version.Version) + if apiGroup != nil { + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } } } @@ -179,23 +182,28 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er Group: metav1.APIGroup{Name: groupName}, VersionedResources: make(map[string][]metav1.APIResource), } - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } // Update information for group resources about versioned resources. // The number of API calls is equal to the number of versions: /apis//. - groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) if err != nil { return fmt.Errorf("failed to get API group resources: %w", err) } - for version, resources := range groupVersionResources { - groupResources.VersionedResources[version.Version] = resources.APIResources + + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] } // Update information for group resources about the API group by adding new versions. // Ignore the versions that are already registered. - for _, version := range versions { + for groupVersion, resources := range groupVersionResources { + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources found := false for _, v := range groupResources.Group.Versions { if v.Version == version { @@ -254,21 +262,17 @@ func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) m.mu.Unlock() // Looking in the cache again. - { - m.mu.RLock() - group, ok := m.apiGroups[groupName] - m.mu.RUnlock() - if ok { - return group, nil - } - } + m.mu.RLock() + defer m.mu.RUnlock() - // If there is still nothing, return an error. - return nil, fmt.Errorf("failed to find API group %q", groupName) + // Don't return an error here if the API group is not present. + // The reloaded RESTMapper will take care of returning a NoMatchError. + return m.apiGroups[groupName], nil } -// fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -276,9 +280,20 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string groupVersion := schema.GroupVersion{Group: groupName, Version: version} apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if err != nil { + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { failedGroups[groupVersion] = err } + if apiResourceList != nil { // even in case of error, some fallback might have been returned. groupVersionResources[groupVersion] = apiResourceList @@ -286,8 +301,35 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string } if len(failedGroups) > 0 { - return nil, &discovery.ErrGroupDiscoveryFailed{Groups: failedGroups} + err := ErrResourceDiscoveryFailed(failedGroups) + return nil, &err } return groupVersionResources, nil } + +// isGroupVersionCached checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 0d8b9fbe18..c0ebb39e3d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -77,10 +77,12 @@ type CacheOptions struct { // Reader is a cache-backed reader that will be used to read objects from the cache. // +required Reader Reader - // DisableFor is a list of objects that should not be read from the cache. + // DisableFor is a list of objects that should never be read from the cache. + // Objects configured here always result in a live lookup. DisableFor []Object // Unstructured is a flag that indicates whether the cache-backed client should // read unstructured objects or lists from the cache. + // If false, unstructured objects will always result in a live lookup. Unstructured bool } @@ -88,11 +90,18 @@ type CacheOptions struct { type NewClientFunc func(config *rest.Config, options Options) (Client, error) // New returns a new Client using the provided config and Options. -// The returned client reads *and* writes directly from the server -// (it doesn't use object caches). It understands how to work with -// normal types (both custom resources and aggregated/built-in resources), -// as well as unstructured types. // +// The client's read behavior is determined by Options.Cache. +// If either Options.Cache or Options.Cache.Reader is nil, +// the client reads directly from the API server. +// If both Options.Cache and Options.Cache.Reader are non-nil, +// the client reads from a local cache. However, specific +// resources can still be configured to bypass the cache based +// on Options.Cache.Unstructured and Options.Cache.DisableFor. +// Write operations are always performed directly on the API server. +// +// The client understands how to work with normal types (both custom resources +// and aggregated/built-in resources), as well as unstructured types. // In the case of normal types, the scheme will be used to look up the // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted @@ -208,7 +217,8 @@ func newClient(config *rest.Config, options Options) (*client, error) { var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. +// client is a client.Client configured to either read from a local cache or directly from the API server. +// Write operations are always performed directly on the API server. // It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient @@ -342,9 +352,11 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...Get if isUncached, err := c.shouldBypassCache(obj); err != nil { return err } else if !isUncached { + // Attempt to get from the cache. return c.cache.Get(ctx, key, obj, opts...) } + // Perform a live lookup. switch obj.(type) { case runtime.Unstructured: return c.unstructuredClient.Get(ctx, key, obj, opts...) @@ -362,9 +374,11 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e if isUncached, err := c.shouldBypassCache(obj); err != nil { return err } else if !isUncached { + // Attempt to get from the cache. return c.cache.List(ctx, obj, opts...) } + // Perform a live lookup. switch x := obj.(type) { case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go new file mode 100644 index 0000000000..2f2f892ef3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldOwner wraps a Client and adds the fieldOwner as the field +// manager to all write requests from this client. If additional [FieldOwner] +// options are specified on methods of this client, the value specified here +// will be overridden. +func WithFieldOwner(c Client, fieldOwner string) Client { + return &clientWithFieldManager{ + manager: fieldOwner, + c: c, + Reader: c, + } +} + +type clientWithFieldManager struct { + manager string + c Client + Reader +} + +func (f *clientWithFieldManager) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return f.c.Create(ctx, obj, append([]CreateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return f.c.Update(ctx, obj, append([]UpdateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return f.c.Delete(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return f.c.DeleteAllOf(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) Scheme() *runtime.Scheme { return f.c.Scheme() } +func (f *clientWithFieldManager) RESTMapper() meta.RESTMapper { return f.c.RESTMapper() } +func (f *clientWithFieldManager) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return f.c.GroupVersionKindFor(obj) +} +func (f *clientWithFieldManager) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return f.c.IsObjectNamespaced(obj) +} + +func (f *clientWithFieldManager) Status() StatusWriter { + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: f.c.Status(), + } +} + +func (f *clientWithFieldManager) SubResource(subresource string) SubResourceClient { + c := f.c.SubResource(subresource) + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: c, + SubResourceReader: c, + } +} + +type subresourceClientWithFieldOwner struct { + owner string + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (f *subresourceClientWithFieldOwner) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return f.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return f.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return f.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{FieldOwner(f.owner)}, opts...)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 0ddda3163d..3cd745e4c0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -142,6 +142,7 @@ type SubResourceWriter interface { // Create saves the subResource object in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error + // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index d81bf25de9..798506f486 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -419,7 +419,7 @@ type ListOptions struct { LabelSelector labels.Selector // FieldSelector filters results by a particular field. In order // to use this with cache-based implementations, restrict usage to - // a single field-value pair that's been added to the indexers. + // exact match field-value pair that's been added to the indexers. FieldSelector fields.Selector // Namespace represents the namespace to list for, or empty for @@ -514,7 +514,8 @@ type MatchingLabels map[string]string func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? if opts.LabelSelector == nil { - opts.LabelSelector = labels.NewSelector() + opts.LabelSelector = labels.SelectorFromValidatedSet(map[string]string(m)) + return } // If there's already a selector, we need to AND the two together. noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 7ab76555b3..248893ea31 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -28,12 +28,11 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" ) @@ -95,17 +94,9 @@ type Options struct { // value only if you know what you are doing. Defaults to 10 hours if unset. // there will a 10 percent jitter between the SyncPeriod of all controllers // so that all controllers will not send list requests simultaneously. - SyncPeriod *time.Duration - - // Namespace if specified restricts the manager's cache to watch objects in - // the desired namespace Defaults to all namespaces // - // Note: If a namespace is specified, controllers can still Watch for a - // cluster-scoped resource (e.g Node). For namespaced resources the cache - // will only hold objects from the desired namespace. - // - // Deprecated: Use Cache.Namespaces instead. - Namespace string + // Deprecated: Use Cache.SyncPeriod instead. + SyncPeriod *time.Duration // HTTPClient is the http client that will be used to create the default // Cache and Client. If not set the rest.HTTPClientFor function will be used @@ -141,18 +132,6 @@ type Options struct { // Only use a custom NewClient if you know what you are doing. NewClient client.NewClientFunc - // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it - // for the given objects. - // - // Deprecated: Use Client.Cache.DisableFor instead. - ClientDisableCacheFor []client.Object - - // DryRunClient specifies whether the client should be configured to enforce - // dryRun mode. - // - // Deprecated: Use Client.DryRun instead. - DryRunClient bool - // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API // Use this to customize the event correlator and spam filter // @@ -218,9 +197,6 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { if cacheOpts.SyncPeriod == nil { cacheOpts.SyncPeriod = options.SyncPeriod } - if len(cacheOpts.Namespaces) == 0 && options.Namespace != "" { - cacheOpts.Namespaces = []string{options.Namespace} - } } cache, err := options.NewCache(config, cacheOpts) if err != nil { @@ -247,16 +223,6 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { if clientOpts.Cache.Reader == nil { clientOpts.Cache.Reader = cache } - - // For backward compatibility, the ClientDisableCacheFor option should - // be appended to the DisableFor option in the client. - clientOpts.Cache.DisableFor = append(clientOpts.Cache.DisableFor, options.ClientDisableCacheFor...) - - if clientOpts.DryRun == nil && options.DryRunClient { - // For backward compatibility, the DryRunClient (if set) option should override - // the DryRun option in the client (if unset). - clientOpts.DryRun = pointer.Bool(true) - } } clientWriter, err := options.NewClient(config, clientOpts) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go index cdc7c334be..ff14c055da 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Code generated by controller-gen. DO NOT EDIT. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 6732b6f709..e48db41f94 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -159,7 +159,9 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller return &controller.Controller{ Do: options.Reconciler, MakeQueue: func() workqueue.RateLimitingInterface { - return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name) + return workqueue.NewRateLimitingQueueWithConfig(options.RateLimiter, workqueue.RateLimitingQueueConfig{ + Name: name, + }) }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, CacheSyncTimeout: options.CacheSyncTimeout, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 344abcd288..05153f74ce 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -76,8 +77,8 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: pointer.Bool(true), - Controller: pointer.Bool(true), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } // Return early with an error if the object is already controlled. @@ -120,6 +121,84 @@ func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) erro return nil } +// RemoveOwnerReference is a helper method to make sure the given object removes an owner reference to the object provided. +// This allows you to remove the owner to establish a new owner of the object in a subsequent call. +func RemoveOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + owners := object.GetOwnerReferences() + length := len(owners) + if length < 1 { + return fmt.Errorf("%T does not have any owner references", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveOwnerReference", owner) + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + + index := indexOwnerRef(owners, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + if index == -1 { + return fmt.Errorf("%T does not have an owner reference for %T", object, owner) + } + + owners = append(owners[:index], owners[index+1:]...) + object.SetOwnerReferences(owners) + return nil +} + +// HasControllerReference returns true if the object +// has an owner ref with controller equal to true +func HasControllerReference(object metav1.Object) bool { + owners := object.GetOwnerReferences() + for _, owner := range owners { + isTrue := owner.Controller + if owner.Controller != nil && *isTrue { + return true + } + } + return false +} + +// RemoveControllerReference removes an owner reference where the controller +// equals true +func RemoveControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + if ok := HasControllerReference(object); !ok { + return fmt.Errorf("%T does not have a owner reference with controller equals true", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveControllerReference", owner) + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ownerRefs := object.GetOwnerReferences() + index := indexOwnerRef(ownerRefs, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + + if index == -1 { + return fmt.Errorf("%T does not have an controller reference for %T", object, owner) + } + + if ownerRefs[index].Controller == nil || !*ownerRefs[index].Controller { + return fmt.Errorf("%T owner is not the controller reference for %T", owner, object) + } + + ownerRefs = append(ownerRefs[:index], ownerRefs[index+1:]...) + object.SetOwnerReferences(ownerRefs) + return nil +} + func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { owners := object.GetOwnerReferences() if idx := indexOwnerRef(owners, ref); idx == -1 { @@ -165,7 +244,6 @@ func referSameObject(a, b metav1.OwnerReference) bool { if err != nil { return false } - return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name } @@ -192,6 +270,9 @@ const ( // They should complete the sentence "Deployment default/foo has been .. // The MutateFn is called regardless of creating or updating an object. // // It returns the executed operation and an error. +// +// Note: changes made by MutateFn to any sub-resource (status...), will be +// discarded. func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { @@ -229,6 +310,12 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f M // The MutateFn is called regardless of creating or updating an object. // // It returns the executed operation and an error. +// +// Note: changes to any sub-resource other than status will be ignored. +// Changes to the status sub-resource will only be applied if the object +// already exist. To change the status on object creation, the easiest +// way is to requeue the object in the controller if OperationResult is +// OperationResultCreated func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { @@ -365,15 +452,18 @@ func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { // It returns an indication of whether it updated the object's list of finalizers. func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { f := o.GetFinalizers() - for i := 0; i < len(f); i++ { + length := len(f) + + index := 0 + for i := 0; i < length; i++ { if f[i] == finalizer { - f = append(f[:i], f[i+1:]...) - i-- - finalizersUpdated = true + continue } + f[index] = f[i] + index++ } - o.SetFinalizers(f) - return + o.SetFinalizers(f[:index]) + return length != index } // ContainsFinalizer checks an Object that the provided finalizer is present. @@ -386,9 +476,3 @@ func ContainsFinalizer(o client.Object, finalizer string) bool { } return false } - -// Object allows functions to work indistinctly with any resource that -// implements both Object interfaces. -// -// Deprecated: Use client.Object instead. -type Object = client.Object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 2f380f4fc4..ff2f3e80b2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -42,7 +42,7 @@ import ( // Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { - // Create is called in response to an create event - e.g. Pod Creation. + // Create is called in response to a create event - e.g. Pod Creation. Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 83aba28cb7..33883647b9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -28,6 +28,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -311,6 +312,7 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the // resource to be synced. + log.V(5).Info("Reconciling") result, err := c.Reconcile(ctx, req) switch { case err != nil: @@ -321,8 +323,12 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() + if !result.IsZero() { + log.Info("Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes reqeueuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler") + } log.Error(err, "Reconciler error") case result.RequeueAfter > 0: + log.V(5).Info(fmt.Sprintf("Reconcile done, requeueing after %s", result.RequeueAfter)) // The result.RequeueAfter request will be lost, if it is returned // along with a non-nil error. But this is intended as // We need to drive to stable reconcile loops before queuing due @@ -331,9 +337,11 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() case result.Requeue: + log.V(5).Info("Reconcile done, requeueing") c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() default: + log.V(5).Info("Reconcile successful") // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.Queue.Forget(obj) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go index 4f6d084318..8f6dc71ede 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go @@ -22,14 +22,16 @@ import ( ) // RequiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. -func RequiresExactMatch(sel fields.Selector) (field, val string, required bool) { +func RequiresExactMatch(sel fields.Selector) bool { reqs := sel.Requirements() - if len(reqs) != 1 { - return "", "", false + if len(reqs) == 0 { + return false } - req := reqs[0] - if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { - return "", "", false + + for _, req := range reqs { + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return false + } } - return req.Field, req.Value, true + return true } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go new file mode 100644 index 0000000000..c78a30377a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go @@ -0,0 +1,38 @@ +package syncs + +import ( + "context" + "reflect" + "sync" +) + +// MergeChans returns a channel that is closed when any of the input channels are signaled. +// The caller must call the returned CancelFunc to ensure no resources are leaked. +func MergeChans[T any](chans ...<-chan T) (<-chan T, context.CancelFunc) { + var once sync.Once + out := make(chan T) + cancel := make(chan T) + cancelFunc := func() { + once.Do(func() { + close(cancel) + }) + <-out + } + cases := make([]reflect.SelectCase, len(chans)+1) + for i := range chans { + cases[i] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(chans[i]), + } + } + cases[len(cases)-1] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(cancel), + } + go func() { + defer close(out) + _, _, _ = reflect.Select(cases) + }() + + return out, cancelFunc +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c27b4305f8..6eb551d3b6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -188,6 +188,9 @@ func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // provided, instead of the temporary initial one, if this method // has not been previously called. func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { + if actual == nil { + actual = NullLogSink{} + } if l.promise != nil { l.promise.Fulfill(actual) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index a79151c69e..ade21d6fb5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -34,6 +34,7 @@ limitations under the License. package log import ( + "bytes" "context" "fmt" "os" @@ -56,7 +57,15 @@ func eventuallyFulfillRoot() { } if time.Since(rootLogCreated).Seconds() >= 30 { if logFullfilled.CompareAndSwap(false, true) { - fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + stack := debug.Stack() + stackLines := bytes.Count(stack, []byte{'\n'}) + sep := []byte{'\n', '\t', '>', ' ', ' '} + + fmt.Fprintf(os.Stderr, + "[controller-runtime] log.SetLogger(...) was never called; logs will not be displayed.\nDetected at:%s%s", sep, + // prefix every line, so it's clear this is a stack trace related to the above message + bytes.Replace(stack, []byte{'\n'}, sep, stackLines-1), + ) SetLogger(logr.New(NullLogSink{})) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go index ee89a7c6a4..3a114667bd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go @@ -148,11 +148,6 @@ type Options struct { // DestWriter controls the destination of the log output. Defaults to // os.Stderr. DestWriter io.Writer - // DestWritter controls the destination of the log output. Defaults to - // os.Stderr. - // - // Deprecated: Use DestWriter instead - DestWritter io.Writer // Level configures the verbosity of the logging. // Defaults to Debug when Development is true and Info otherwise. // A zap log level should be multiplied by -1 to get the logr verbosity. @@ -174,11 +169,8 @@ type Options struct { // addDefaults adds defaults to the Options. func (o *Options) addDefaults() { - if o.DestWriter == nil && o.DestWritter == nil { + if o.DestWriter == nil { o.DestWriter = os.Stderr - } else if o.DestWriter == nil && o.DestWritter != nil { - // while misspelled DestWritter is deprecated but still not removed - o.DestWriter = o.DestWritter } if o.Development { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index f298229e57..2b03263de8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -28,7 +28,6 @@ import ( "time" "github.com/go-logr/logr" - "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -44,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" - "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -57,7 +56,6 @@ const ( defaultReadinessEndpoint = "/readyz" defaultLivenessEndpoint = "/healthz" - defaultMetricsEndpoint = "/metrics" ) var _ Runnable = &controllerManager{} @@ -84,11 +82,8 @@ type controllerManager struct { // on shutdown leaderElectionReleaseOnCancel bool - // metricsListener is used to serve prometheus metrics - metricsListener net.Listener - - // metricsExtraHandlers contains extra handlers to register on http server that serves metrics. - metricsExtraHandlers map[string]http.Handler + // metricsServer is used to serve prometheus metrics + metricsServer metricsserver.Server // healthProbeListener is used to serve liveness probe healthProbeListener net.Listener @@ -184,24 +179,20 @@ func (cm *controllerManager) add(r Runnable) error { return cm.runnables.Add(r) } -// AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. -func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { +// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { cm.Lock() defer cm.Unlock() - if cm.started { return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") } - - if path == defaultMetricsEndpoint { - return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + if cm.metricsServer == nil { + cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler %q will be ignored", path) + return nil } - - if _, found := cm.metricsExtraHandlers[path]; found { - return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil { + return err } - - cm.metricsExtraHandlers[path] = handler cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) return nil } @@ -296,31 +287,10 @@ func (cm *controllerManager) GetControllerOptions() config.Controller { return cm.controllerConfig } -func (cm *controllerManager) addMetricsServer() error { +func (cm *controllerManager) addHealthProbeServer() error { mux := http.NewServeMux() srv := httpserver.New(mux) - handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ - ErrorHandling: promhttp.HTTPErrorOnError, - }) - // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics - mux.Handle(defaultMetricsEndpoint, handler) - for path, extraHandler := range cm.metricsExtraHandlers { - mux.Handle(path, extraHandler) - } - - return cm.add(&server{ - Kind: "metrics", - Log: cm.logger.WithValues("path", defaultMetricsEndpoint), - Server: srv, - Listener: cm.metricsListener, - }) -} - -func (cm *controllerManager) serveHealthProbes() { - mux := http.NewServeMux() - server := httpserver.New(mux) - if cm.readyzHandler != nil { mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) // Append '/' suffix to handle subpaths @@ -332,7 +302,12 @@ func (cm *controllerManager) serveHealthProbes() { mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) } - go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) + return cm.add(&server{ + Kind: "health probe", + Log: cm.logger, + Server: srv, + Listener: cm.healthProbeListener, + }) } func (cm *controllerManager) addPprofServer() error { @@ -353,42 +328,6 @@ func (cm *controllerManager) addPprofServer() error { }) } -func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { - log = log.WithValues("kind", kind, "addr", ln.Addr()) - - go func() { - log.Info("Starting server") - if err := server.Serve(ln); err != nil { - if errors.Is(err, http.ErrServerClosed) { - return - } - if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { - // There might be cases where connections are still open and we try to shutdown - // but not having enough time to close the connection causes an error in Serve - // - // In that case we want to avoid returning an error to the main error channel. - log.Error(err, "error on Serve after stop has been engaged") - return - } - cm.errChan <- err - } - }() - - // Shutdown the server when stop is closed. - <-cm.internalProceduresStop - if err := server.Shutdown(cm.shutdownCtx); err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - // Avoid logging context related errors. - return - } - if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { - cm.logger.Error(err, "error on Shutdown after stop has been engaged") - return - } - cm.errChan <- err - } -} - // Start starts the manager and waits indefinitely. // There is only two ways to have start return: // An error has occurred during in one of the internal operations, @@ -441,15 +380,19 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // Metrics should be served whether the controller is leader or not. // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused). - if cm.metricsListener != nil { - if err := cm.addMetricsServer(); err != nil { + if cm.metricsServer != nil { + // Note: We are adding the metrics server directly to HTTPServers here as matching on the + // metricsserver.Server interface in cm.runnables.Add would be very brittle. + if err := cm.runnables.HTTPServers.Add(cm.metricsServer, nil); err != nil { return fmt.Errorf("failed to add metrics server: %w", err) } } // Serve health probes. if cm.healthProbeListener != nil { - cm.serveHealthProbes() + if err := cm.addHealthProbeServer(); err != nil { + return fmt.Errorf("failed to add health probe server: %w", err) + } } // Add pprof server @@ -459,7 +402,17 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { } } - // First start any webhook servers, which includes conversion, validation, and defaulting + // First start any internal HTTP servers, which includes health probes, metrics and profiling if enabled. + // + // WARNING: Internal HTTP servers MUST start before any cache is populated, otherwise it would block + // conversion webhooks to be ready for serving which make the cache never get ready. + if err := cm.runnables.HTTPServers.Start(cm.internalCtx); err != nil { + if err != nil { + return fmt.Errorf("failed to start HTTP servers: %w", err) + } + } + + // Start any webhook servers, which includes conversion, validation, and defaulting // webhooks that are registered. // // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition @@ -583,6 +536,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e // Stop all the leader election runnables, which includes reconcilers. cm.logger.Info("Stopping and waiting for leader election runnables") + // Prevent leader election when shutting down a non-elected manager + cm.runnables.LeaderElection.startOnce.Do(func() {}) cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) // Stop the caches before the leader election runnables, this is an important @@ -591,10 +546,13 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e cm.logger.Info("Stopping and waiting for caches") cm.runnables.Caches.StopAndWait(cm.shutdownCtx) - // Webhooks should come last, as they might be still serving some requests. + // Webhooks and internal HTTP servers should come last, as they might be still serving some requests. cm.logger.Info("Stopping and waiting for webhooks") cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx) + cm.logger.Info("Stopping and waiting for HTTP servers") + cm.runnables.HTTPServers.StopAndWait(cm.shutdownCtx) + // Proceed to close the manager and overall shutdown context. cm.logger.Info("Wait completed, proceeding to shutdown the manager") shutdownCancel() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 72a4a7801a..0b7a865004 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -18,7 +18,6 @@ package manager import ( "context" - "crypto/tls" "errors" "fmt" "net" @@ -27,13 +26,16 @@ import ( "time" "github.com/go-logr/logr" + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,7 +46,6 @@ import ( intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -66,12 +67,14 @@ type Manager interface { // election was configured. Elected() <-chan struct{} - // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. - // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be - // sensitive and shouldn't be exposed publicly. - // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as - // Runnable to the manager via Add method. - AddMetricsExtraHandler(path string, handler http.Handler) error + // AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. + // + // Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly. + // + // If the simple path -> handler mapping offered here is not enough, + // a new http server/listener should be added as Runnable to the manager via Add method. + AddMetricsServerExtraHandler(path string, handler http.Handler) error // AddHealthzCheck allows you to add Healthz checker AddHealthzCheck(name string, check healthz.Checker) error @@ -141,35 +144,6 @@ type Options struct { // Only use a custom NewClient if you know what you are doing. NewClient client.NewClientFunc - // SyncPeriod determines the minimum frequency at which watched resources are - // reconciled. A lower period will correct entropy more quickly, but reduce - // responsiveness to change if there are many watched resources. Change this - // value only if you know what you are doing. Defaults to 10 hours if unset. - // there will a 10 percent jitter between the SyncPeriod of all controllers - // so that all controllers will not send list requests simultaneously. - // - // This applies to all controllers. - // - // A period sync happens for two reasons: - // 1. To insure against a bug in the controller that causes an object to not - // be requeued, when it otherwise should be requeued. - // 2. To insure against an unknown bug in controller-runtime, or its dependencies, - // that causes an object to not be requeued, when it otherwise should be - // requeued, or to be removed from the queue, when it otherwise should not - // be removed. - // - // If you want - // 1. to insure against missed watch events, or - // 2. to poll services that cannot be watched, - // then we recommend that, instead of changing the default period, the - // controller requeue, with a constant duration `t`, whenever the controller - // is "done" with an object, and would otherwise not requeue it, i.e., we - // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, - // instead of `reconcile.Result{}`. - // - // Deprecated: Use Cache.SyncPeriod instead. - SyncPeriod *time.Duration - // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. Logger logr.Logger @@ -240,27 +214,17 @@ type Options struct { // wait to force acquire leadership. This is measured against time of // last observed ack. Default is 15 seconds. LeaseDuration *time.Duration + // RenewDeadline is the duration that the acting controlplane will retry // refreshing leadership before giving up. Default is 10 seconds. RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait // between tries of actions. Default is 2 seconds. RetryPeriod *time.Duration - // Namespace, if specified, restricts the manager's cache to watch objects in - // the desired namespace. Defaults to all namespaces. - // - // Note: If a namespace is specified, controllers can still Watch for a - // cluster-scoped resource (e.g Node). For namespaced resources, the cache - // will only hold objects from the desired namespace. - // - // Deprecated: Use Cache.Namespaces instead. - Namespace string - - // MetricsBindAddress is the TCP address that the controller should bind to - // for serving prometheus metrics. - // It can be set to "0" to disable the metrics serving. - MetricsBindAddress string + // Metrics are the metricsserver.Options that will be used to create the metricsserver.Server. + Metrics metricsserver.Options // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -280,34 +244,9 @@ type Options struct { // before exposing it to public. PprofBindAddress string - // Port is the port that the webhook server serves at. - // It is used to set webhook.Server.Port if WebhookServer is not set. - // - // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. - Port int - // Host is the hostname that the webhook server binds to. - // It is used to set webhook.Server.Host if WebhookServer is not set. - // - // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. - Host string - - // CertDir is the directory that contains the server key and certificate. - // If not set, webhook server would look up the server key and certificate in - // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate - // must be named tls.key and tls.crt, respectively. - // It is used to set webhook.Server.CertDir if WebhookServer is not set. - // - // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. - CertDir string - - // TLSOpts is used to allow configuring the TLS config used for the webhook server. - // - // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. - TLSOpts []func(*tls.Config) - // WebhookServer is an externally configured webhook.Server. By default, - // a Manager will create a default server using Port, Host, and CertDir; - // if this is set, the Manager will use this server instead. + // a Manager will create a server via webhook.NewServer with default settings. + // If this is set, the Manager will use this server instead. WebhookServer webhook.Server // BaseContext is the function that provides Context values to Runnables @@ -315,18 +254,6 @@ type Options struct { // will receive a new Background Context instead. BaseContext BaseContextFunc - // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it - // for the given objects. - // - // Deprecated: Use Client.Cache.DisableCacheFor instead. - ClientDisableCacheFor []client.Object - - // DryRunClient specifies whether the client should be configured to enforce - // dryRun mode. - // - // Deprecated: Use Client.DryRun instead. - DryRunClient bool - // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API // Use this to customize the event correlator and spam filter // @@ -354,7 +281,7 @@ type Options struct { // Dependency injection for testing newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) - newMetricsListener func(addr string) (net.Listener, error) + newMetricsServer func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) newHealthProbeListener func(addr string) (net.Listener, error) newPprofListener func(addr string) (net.Listener, error) } @@ -391,6 +318,9 @@ type LeaderElectionRunnable interface { } // New returns a new Manager for creating Controllers. +// Note that if ContentType in the given config is not set, "application/vnd.kubernetes.protobuf" +// will be used for all built-in resources of Kubernetes, and "application/json" is for other types +// including all CRD resources. func New(config *rest.Config, options Options) (Manager, error) { if config == nil { return nil, errors.New("must specify Config") @@ -402,15 +332,11 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.Scheme = options.Scheme clusterOptions.MapperProvider = options.MapperProvider clusterOptions.Logger = options.Logger - clusterOptions.SyncPeriod = options.SyncPeriod clusterOptions.NewCache = options.NewCache clusterOptions.NewClient = options.NewClient clusterOptions.Cache = options.Cache clusterOptions.Client = options.Client - clusterOptions.Namespace = options.Namespace //nolint:staticcheck - clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor //nolint:staticcheck - clusterOptions.DryRunClient = options.DryRunClient //nolint:staticcheck - clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err @@ -438,7 +364,20 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderRecorderProvider = recorderProvider } else { leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) - leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + scheme := cluster.GetScheme() + err := corev1.AddToScheme(scheme) + if err != nil { + return nil, err + } + err = coordinationv1.AddToScheme(scheme) + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(options.LeaderElectionConfig) + if err != nil { + return nil, err + } + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, httpClient, scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -459,16 +398,12 @@ func New(config *rest.Config, options Options) (Manager, error) { } } - // Create the metrics listener. This will throw an error if the metrics bind - // address is invalid or already in use. - metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) + // Create the metrics server. + metricsServer, err := options.newMetricsServer(options.Metrics, config, cluster.GetHTTPClient()) if err != nil { return nil, err } - // By default we have no extra endpoints to expose on metrics http server. - metricsExtraHandlers := make(map[string]http.Handler) - // Create health probes listener. This will throw an error if the bind // address is invalid or already in use. healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) @@ -483,18 +418,16 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, fmt.Errorf("failed to new pprof listener: %w", err) } - errChan := make(chan error) + errChan := make(chan error, 1) runnables := newRunnables(options.BaseContext, errChan) - return &controllerManager{ - stopProcedureEngaged: pointer.Int64(0), + stopProcedureEngaged: ptr.To(int64(0)), cluster: cluster, runnables: runnables, errChan: errChan, recorderProvider: recorderProvider, resourceLock: resourceLock, - metricsListener: metricsListener, - metricsExtraHandlers: metricsExtraHandlers, + metricsServer: metricsServer, controllerConfig: options.Controller, logger: options.Logger, elected: make(chan struct{}), @@ -532,16 +465,16 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, o = o.setLeaderElectionConfig(newObj) - if o.SyncPeriod == nil && newObj.SyncPeriod != nil { - o.SyncPeriod = &newObj.SyncPeriod.Duration + if o.Cache.SyncPeriod == nil && newObj.SyncPeriod != nil { + o.Cache.SyncPeriod = &newObj.SyncPeriod.Duration } - if o.Namespace == "" && newObj.CacheNamespace != "" { - o.Namespace = newObj.CacheNamespace + if len(o.Cache.DefaultNamespaces) == 0 && newObj.CacheNamespace != "" { + o.Cache.DefaultNamespaces = map[string]cache.Config{newObj.CacheNamespace: {}} } - if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" { - o.MetricsBindAddress = newObj.Metrics.BindAddress + if o.Metrics.BindAddress == "" && newObj.Metrics.BindAddress != "" { + o.Metrics.BindAddress = newObj.Metrics.BindAddress } if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" { @@ -556,20 +489,15 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, o.LivenessEndpointName = newObj.Health.LivenessEndpointName } - if o.Port == 0 && newObj.Webhook.Port != nil { - o.Port = *newObj.Webhook.Port - } - if o.Host == "" && newObj.Webhook.Host != "" { - o.Host = newObj.Webhook.Host - } - if o.CertDir == "" && newObj.Webhook.CertDir != "" { - o.CertDir = newObj.Webhook.CertDir - } if o.WebhookServer == nil { + port := 0 + if newObj.Webhook.Port != nil { + port = *newObj.Webhook.Port + } o.WebhookServer = webhook.NewServer(webhook.Options{ - Port: o.Port, - Host: o.Host, - CertDir: o.CertDir, + Port: port, + Host: newObj.Webhook.Host, + CertDir: newObj.Webhook.CertDir, }) } @@ -697,8 +625,8 @@ func setOptionsDefaults(options Options) Options { } } - if options.newMetricsListener == nil { - options.newMetricsListener = metrics.NewListener + if options.newMetricsServer == nil { + options.newMetricsServer = metricsserver.NewServer } leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod if options.LeaseDuration == nil { @@ -743,12 +671,7 @@ func setOptionsDefaults(options Options) Options { } if options.WebhookServer == nil { - options.WebhookServer = webhook.NewServer(webhook.Options{ - Host: options.Host, - Port: options.Port, - CertDir: options.CertDir, - TLSOpts: options.TLSOpts, - }) + options.WebhookServer = webhook.NewServer(webhook.Options{}) } return options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index 549741e6e5..6060910485 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -28,6 +28,7 @@ type runnableCheck func(ctx context.Context) bool // runnables handles all the runnables for a manager by grouping them accordingly to their // type (webhooks, caches etc.). type runnables struct { + HTTPServers *runnableGroup Webhooks *runnableGroup Caches *runnableGroup LeaderElection *runnableGroup @@ -37,6 +38,7 @@ type runnables struct { // newRunnables creates a new runnables object. func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { return &runnables{ + HTTPServers: newRunnableGroup(baseContext, errChan), Webhooks: newRunnableGroup(baseContext, errChan), Caches: newRunnableGroup(baseContext, errChan), LeaderElection: newRunnableGroup(baseContext, errChan), @@ -52,6 +54,8 @@ func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { // The runnables added after Start are started directly. func (r *runnables) Add(fn Runnable) error { switch runnable := fn.(type) { + case *server: + return r.HTTPServers.Add(fn, nil) case hasCache: return r.Caches.Add(fn, func(ctx context.Context) bool { return runnable.GetCache().WaitForCacheSync(ctx) @@ -259,6 +263,15 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { r.start.Unlock() } + // Recheck if we're stopped and hold the readlock, given that the stop and start can be called + // at the same time, we can end up in a situation where the runnable is added + // after the group is stopped and the channel is closed. + r.stop.RLock() + defer r.stop.RUnlock() + if r.stopped { + return errRunnableGroupStopped + } + // Enqueue the runnable. r.ch <- readyRunnable return nil @@ -268,7 +281,11 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { func (r *runnableGroup) StopAndWait(ctx context.Context) { r.stopOnce.Do(func() { // Close the reconciler channel once we're done. - defer close(r.ch) + defer func() { + r.stop.Lock() + close(r.ch) + r.stop.Unlock() + }() _ = r.Start(ctx) r.stop.Lock() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go deleted file mode 100644 index 123d8c15f9..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "fmt" - "net" - - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("metrics") - -// DefaultBindAddress sets the default bind address for the metrics listener -// The metrics is on by default. -var DefaultBindAddress = ":8080" - -// NewListener creates a new TCP listener bound to the given address. -func NewListener(addr string) (net.Listener, error) { - if addr == "" { - // If the metrics bind address is empty, default to ":8080" - addr = DefaultBindAddress - } - - // Add a case to disable metrics altogether - if addr == "0" { - return nil, nil - } - - log.Info("Metrics server is starting to listen", "addr", addr) - ln, err := net.Listen("tcp", addr) - if err != nil { - er := fmt.Errorf("error listening on %s: %w", addr, err) - log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") - return nil, er - } - return ln, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/doc.go new file mode 100644 index 0000000000..4c42f6eed7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package server provides the metrics server implementation. +*/ +package server + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go new file mode 100644 index 0000000000..40eb9db8cc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go @@ -0,0 +1,332 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" + + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +const ( + defaultMetricsEndpoint = "/metrics" +) + +// DefaultBindAddress is the default bind address for the metrics server. +var DefaultBindAddress = ":8080" + +// Server is a server that serves metrics. +type Server interface { + // AddExtraHandler adds extra handler served on path to the http server that serves metrics. + AddExtraHandler(path string, handler http.Handler) error + + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the metrics server doesn't need leader election. + NeedLeaderElection() bool + + // Start runs the server. + // It will install the metrics related resources depending on the server configuration. + Start(ctx context.Context) error +} + +// Options are all available options for the metrics.Server +type Options struct { + // SecureServing enables serving metrics via https. + // Per default metrics will be served via http. + SecureServing bool + + // BindAddress is the bind address for the metrics server. + // It will be defaulted to ":8080" if unspecified. + // Set this to "0" to disable the metrics server. + BindAddress string + + // ExtraHandlers contains a map of handlers (by path) which will be added to the metrics server. + // This might be useful to register diagnostic endpoints e.g. pprof. + // Note that pprof endpoints are meant to be sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http + // server/listener should be added as Runnable to the manager via the Add method. + ExtraHandlers map[string]http.Handler + + // FilterProvider provides a filter which is a func that is added around + // the metrics and the extra handlers on the metrics server. + // This can be e.g. used to enforce authentication and authorization on the handlers + // endpoint by setting this field to filters.WithAuthenticationAndAuthorization. + FilterProvider func(c *rest.Config, httpClient *http.Client) (Filter, error) + + // CertDir is the directory that contains the server key and certificate. Defaults to + // /k8s-metrics-server/serving-certs. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + CertDir string + + // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + KeyName string + + // TLSOpts is used to allow configuring the TLS config used for the server. + // This also allows providing a certificate via GetCertificate. + TLSOpts []func(*tls.Config) +} + +// Filter is a func that is added around metrics and extra handlers on the metrics server. +type Filter func(log logr.Logger, handler http.Handler) (http.Handler, error) + +// NewServer constructs a new metrics.Server from the provided options. +func NewServer(o Options, config *rest.Config, httpClient *http.Client) (Server, error) { + o.setDefaults() + + // Skip server creation if metrics are disabled. + if o.BindAddress == "0" { + return nil, nil + } + + // Validate that ExtraHandlers is not overwriting the default /metrics endpoint. + if o.ExtraHandlers != nil { + if _, ok := o.ExtraHandlers[defaultMetricsEndpoint]; ok { + return nil, fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + } + + // Create the metrics filter if a FilterProvider is set. + var metricsFilter Filter + if o.FilterProvider != nil { + var err error + metricsFilter, err = o.FilterProvider(config, httpClient) + if err != nil { + return nil, fmt.Errorf("filter provider failed to create filter for the metrics server: %w", err) + } + } + + return &defaultServer{ + metricsFilter: metricsFilter, + options: o, + }, nil +} + +// defaultServer is the default implementation used for Server. +type defaultServer struct { + options Options + + // metricsFilter is a filter which is added around + // the metrics and the extra handlers on the metrics server. + metricsFilter Filter + + // mu protects access to the bindAddr field. + mu sync.RWMutex + + // bindAddr is used to store the bindAddr after the listener has been created. + // This is used during testing to figure out the port that has been chosen randomly. + bindAddr string +} + +// setDefaults does defaulting for the Server. +func (o *Options) setDefaults() { + if o.BindAddress == "" { + o.BindAddress = DefaultBindAddress + } + + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-metrics-server", "serving-certs") + } + + if len(o.CertName) == 0 { + o.CertName = "tls.crt" + } + + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the metrics server doesn't need leader election. +func (*defaultServer) NeedLeaderElection() bool { + return false +} + +// AddExtraHandler adds extra handler served on path to the http server that serves metrics. +func (s *defaultServer) AddExtraHandler(path string, handler http.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.options.ExtraHandlers == nil { + s.options.ExtraHandlers = make(map[string]http.Handler) + } + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + if _, found := s.options.ExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + s.options.ExtraHandlers[path] = handler + return nil +} + +// Start runs the server. +// It will install the metrics related resources depend on the server configuration. +func (s *defaultServer) Start(ctx context.Context) error { + log.Info("Starting metrics server") + + listener, err := s.createListener(ctx, log) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to create listener: %w", err) + } + // Storing bindAddr here so we can retrieve it during testing via GetBindAddr. + s.mu.Lock() + s.bindAddr = listener.Addr().String() + s.mu.Unlock() + + mux := http.NewServeMux() + + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ + ErrorHandling: promhttp.HTTPErrorOnError, + }) + if s.metricsFilter != nil { + log := log.WithValues("path", defaultMetricsEndpoint) + var err error + handler, err = s.metricsFilter(log, handler) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to add metrics filter: %w", err) + } + } + // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics + mux.Handle(defaultMetricsEndpoint, handler) + + for path, extraHandler := range s.options.ExtraHandlers { + if s.metricsFilter != nil { + log := log.WithValues("path", path) + var err error + extraHandler, err = s.metricsFilter(log, extraHandler) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to add metrics filter to extra handler for path %s: %w", path, err) + } + } + mux.Handle(path, extraHandler) + } + + log.Info("Serving metrics server", "bindAddress", s.options.BindAddress, "secure", s.options.SecureServing) + + srv := httpserver.New(mux) + + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("Shutting down metrics server with timeout of 1 minute") + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +func (s *defaultServer) createListener(ctx context.Context, log logr.Logger) (net.Listener, error) { + if !s.options.SecureServing { + return net.Listen("tcp", s.options.BindAddress) + } + + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.options.TLSOpts { + op(cfg) + } + + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.options.CertDir, s.options.CertName) + keyPath := filepath.Join(s.options.CertDir, s.options.KeyName) + + _, certErr := os.Stat(certPath) + certExists := !os.IsNotExist(certErr) + _, keyErr := os.Stat(keyPath) + keyExists := !os.IsNotExist(keyErr) + if certExists && keyExists { + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return nil, err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + } + } + + // If cfg.GetCertificate is still nil, i.e. we didn't configure a cert watcher, fallback to a self-signed certificate. + if cfg.GetCertificate == nil { + // Note: Using self-signed certificates here should be good enough. It's just important that we + // encrypt the communication. For example kube-controller-manager also uses a self-signed certificate + // for the metrics endpoint per default. + cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures("localhost", []net.IP{{127, 0, 0, 1}}, nil, "") + if err != nil { + return nil, fmt.Errorf("failed to generate self-signed certificate for metrics server: %w", err) + } + + keyPair, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("failed to create self-signed key pair for metrics server: %w", err) + } + cfg.Certificates = []tls.Certificate{keyPair} + } + + return tls.Listen("tcp", s.options.BindAddress, cfg) +} + +func (s *defaultServer) GetBindAddr() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.bindAddr +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go index 277b878810..cff1de4c1c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -54,14 +54,14 @@ var ( Subsystem: WorkQueueSubsystem, Name: QueueLatencyKey, Help: "How long in seconds an item stays in workqueue before being requested", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), }, []string{"name"}) workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: WorkQueueSubsystem, Name: WorkDurationKey, Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), }, []string{"name"}) unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 4c8f8357a5..f1cce87c85 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -19,9 +19,11 @@ package reconcile import ( "context" "errors" + "reflect" "time" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) // Result contains the result of a Reconciler invocation. @@ -89,8 +91,16 @@ instead the reconcile function observes this when reading the cluster state and */ type Reconciler interface { // Reconcile performs a full reconciliation for the object referred to by the Request. - // The Controller will requeue the Request to be processed again if an error is non-nil or - // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. + // + // If the returned error is non-nil, the Result is ignored and the request will be + // requeued using exponential backoff. The only exception is if the error is a + // TerminalError in which case no requeuing happens. + // + // If the error is nil and the returned Result has a non-zero result.RequeueAfter, the request + // will be requeued after the specified duration. + // + // If the error is nil and result.RequeueAfter is zero and result.Requeue is true, the request + // will be requeued using exponential backoff. Reconcile(context.Context, Request) (Result, error) } @@ -102,6 +112,36 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } +// ObjectReconciler is a specialized version of Reconciler that acts on instances of client.Object. Each reconciliation +// event gets the associated object from Kubernetes before passing it to Reconcile. An ObjectReconciler can be used in +// Builder.Complete by calling AsReconciler. See Reconciler for more details. +type ObjectReconciler[T client.Object] interface { + Reconcile(context.Context, T) (Result, error) +} + +// AsReconciler creates a Reconciler based on the given ObjectReconciler. +func AsReconciler[T client.Object](client client.Client, rec ObjectReconciler[T]) Reconciler { + return &objectReconcilerAdapter[T]{ + objReconciler: rec, + client: client, + } +} + +type objectReconcilerAdapter[T client.Object] struct { + objReconciler ObjectReconciler[T] + client client.Client +} + +// Reconcile implements Reconciler. +func (a *objectReconcilerAdapter[T]) Reconcile(ctx context.Context, req Request) (Result, error) { + o := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) + if err := a.client.Get(ctx, req.NamespacedName, o); err != nil { + return Result{}, client.IgnoreNotFound(err) + } + + return a.objReconciler.Reconcile(ctx, o) +} + // TerminalError is an error that will not be retried but still be logged // and recorded in metrics. func TerminalError(wrapped error) error { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index a3b7207168..c9662ce1c0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -27,12 +27,14 @@ import ( ) // Defaulter defines functions for setting defaults on resources. +// Deprecated: Ue CustomDefaulter instead. type Defaulter interface { runtime.Object Default() } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +// Deprecated: Use WithCustomDefaulter instead. func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 84ab5e75a4..f049fb66e6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -34,6 +34,26 @@ import ( var admissionScheme = runtime.NewScheme() var admissionCodecs = serializer.NewCodecFactory(admissionScheme) +// adapted from https://github.com/kubernetes/kubernetes/blob/c28c2009181fcc44c5f6b47e10e62dacf53e4da0/staging/src/k8s.io/pod-security-admission/cmd/webhook/server/server.go +// +// From https://github.com/kubernetes/apiserver/blob/d6876a0600de06fef75968c4641c64d7da499f25/pkg/server/config.go#L433-L442C5: +// +// 1.5MB is the recommended client request size in byte +// the etcd server should accept. See +// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. +// A request body might be encoded in json, and is converted to +// proto when persisted in etcd, so we allow 2x as the largest request +// body size to be accepted and decoded in a write request. +// +// For the admission request, we can infer that it contains at most two objects +// (the old and new versions of the object being admitted), each of which can +// be at most 3MB in size. For the rest of the request, we can assume that +// it will be less than 1MB in size. Therefore, we can set the max request +// size to 7MB. +// If your use case requires larger max request sizes, please +// open an issue (https://github.com/kubernetes-sigs/controller-runtime/issues/new). +const maxRequestSize = int64(7 * 1024 * 1024) + func init() { utilruntime.Must(v1.AddToScheme(admissionScheme)) utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) @@ -42,27 +62,30 @@ func init() { var _ http.Handler = &Webhook{} func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var body []byte - var err error ctx := r.Context() if wh.WithContextFunc != nil { ctx = wh.WithContextFunc(ctx, r) } - var reviewResponse Response - if r.Body == nil { - err = errors.New("request body is empty") + if r.Body == nil || r.Body == http.NoBody { + err := errors.New("request body is empty") wh.getLogger(nil).Error(err, "bad request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } defer r.Body.Close() - if body, err = io.ReadAll(r.Body); err != nil { + limitedReader := &io.LimitedReader{R: r.Body, N: maxRequestSize} + body, err := io.ReadAll(limitedReader) + if err != nil { wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) + return + } + if limitedReader.N <= 0 { + err := fmt.Errorf("request entity is too large; limit is %d bytes", maxRequestSize) + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request; limit reached") + wh.writeResponse(w, Errored(http.StatusRequestEntityTooLarge, err)) return } @@ -70,8 +93,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } @@ -89,14 +111,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { wh.getLogger(nil).Error(err, "unable to decode the request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } - wh.getLogger(&req).V(4).Info("received request") + wh.getLogger(&req).V(5).Info("received request") - reviewResponse = wh.Handle(ctx, req) - wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) + wh.writeResponseTyped(w, wh.Handle(ctx, req), actualAdmRevGVK) } // writeResponse writes response to w generically, i.e. without encoding GVK information. @@ -136,11 +156,11 @@ func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { } } else { res := ar.Response - if log := wh.getLogger(nil); log.V(4).Enabled() { + if log := wh.getLogger(nil); log.V(5).Enabled() { if res.Result != nil { log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) } - log.V(4).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) + log.V(5).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 00bda8a4ce..fa42217bd6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -33,6 +33,7 @@ type Warnings []string // Validator defines functions for validating an operation. // The custom resource kind which implements this interface can validate itself. // To validate the custom resource with another specific struct, use CustomValidator instead. +// Deprecated: Use CustomValidator instead. type Validator interface { runtime.Object @@ -53,6 +54,7 @@ type Validator interface { } // ValidatingWebhookFor creates a new Webhook for validating the provided type. +// Deprecated: Use WithCustomValidator instead. func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index e99fbd8a85..07650aa60a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -30,7 +30,6 @@ import ( // CustomValidator defines functions for validating an operation. // The object to be validated is passed into methods as a parameter. type CustomValidator interface { - // ValidateCreate validates the object on creation. // The optional warnings will be added to the response as warning messages. // Return an error if the object is invalid. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go index 293137db49..e8439e2ea2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -24,9 +24,11 @@ import ( // define some aliases for common bits of the webhook functionality // Defaulter defines functions for setting defaults on resources. +// Deprecated: Use CustomDefaulter instead. type Defaulter = admission.Defaulter // Validator defines functions for validating an operation. +// Deprecated: Use CustomValidator instead. type Validator = admission.Validator // CustomDefaulter defines functions for setting defaults on resources. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 23d5bf4350..f8820e8b7c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -77,37 +77,33 @@ type Options struct { // It will be defaulted to 9443 if unspecified. Port int - // CertDir is the directory that contains the server key and certificate. The - // server key and certificate. + // CertDir is the directory that contains the server key and certificate. Defaults to + // /k8s-webhook-server/serving-certs. CertDir string // CertName is the server certificate name. Defaults to tls.crt. // - // Note: This option should only be set when TLSOpts does not override GetCertificate. + // Note: This option is only used when TLSOpts does not set GetCertificate. CertName string // KeyName is the server key name. Defaults to tls.key. // - // Note: This option should only be set when TLSOpts does not override GetCertificate. + // Note: This option is only used when TLSOpts does not set GetCertificate. KeyName string // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. // Defaults to "", which means server does not verify client's certificate. ClientCAName string - // TLSVersion is the minimum version of TLS supported. Accepts - // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) - // Deprecated: Use TLSOpts instead. - TLSMinVersion string - - // TLSOpts is used to allow configuring the TLS config used for the server + // TLSOpts is used to allow configuring the TLS config used for the server. + // This also allows providing a certificate via GetCertificate. TLSOpts []func(*tls.Config) // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux } -// NewServer constructs a new Server from the provided options. +// NewServer constructs a new webhook.Server from the provided options. func NewServer(o Options) Server { return &DefaultServer{ Options: o, @@ -187,42 +183,15 @@ func (s *DefaultServer) Register(path string, hook http.Handler) { regLog.Info("Registering webhook") } -// tlsVersion converts from human-readable TLS version (for example "1.1") -// to the values accepted by tls.Config (for example 0x301). -func tlsVersion(version string) (uint16, error) { - switch version { - // default is previous behaviour - case "": - return tls.VersionTLS10, nil - case "1.0": - return tls.VersionTLS10, nil - case "1.1": - return tls.VersionTLS11, nil - case "1.2": - return tls.VersionTLS12, nil - case "1.3": - return tls.VersionTLS13, nil - default: - return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) - } -} - // Start runs the server. // It will install the webhook related resources depend on the server configuration. func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) - baseHookLog := log.WithName("webhooks") - baseHookLog.Info("Starting webhook server") - - tlsMinVersion, err := tlsVersion(s.Options.TLSMinVersion) - if err != nil { - return err - } + log.Info("Starting webhook server") cfg := &tls.Config{ //nolint:gosec NextProtos: []string{"h2"}, - MinVersion: tlsMinVersion, } // fallback TLS config ready, will now mutate if passer wants full control over it for _, op := range s.Options.TLSOpts {