From 498eced5a398eac27603b663db5a40b98fae1add Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 25 Aug 2023 14:36:23 -0700 Subject: [PATCH 001/370] Recommend avoiding error constants Fixes https://github.com/crossplane/crossplane/issues/4514 See that issue for discussion and context. Signed-off-by: Nic Cope --- contributing/README.md | 133 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 1 deletion(-) diff --git a/contributing/README.md b/contributing/README.md index bfe83a4bd..8b396696b 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -473,6 +473,136 @@ func example() error { } ``` +Previously we made heavy use of error constants, for example: + +```go +const errFetch = "could not fetch the thing" + +if err != nil { + return errors.Wrap(err, errFetch) +} +``` + +__We no longer recommend this pattern__. Instead, you should mostly create or +wrap errors with "inline" error strings. Refer to [#4514] for context. + +### Test Error Properties, not Error Strings + +We recommend using `cmpopts.EquateErrors` to test that your code returns the +expected error. This `cmp` option will consider one error that `errors.Is` +another to be equal to it. + +When testing a simple function with few error cases it's usually sufficient to +test simply whether or not an error was returned. You can use `cmpopts.AnyError` +for this. We prefer `cmpopts.AnyError` to a simple `err == nil` test because it +keeps our tests consistent. This way it's easy to mix and match tests that check +for `cmpopts.AnyError` with tests that check for a more specific error in the +same test table. + +For example: + +```go +func TestQuack(t *testing.T) { + type want struct { + output string + err error + } + + // We only care that Quack returns an error when supplied with a bad + // input, and returns no error when supplied with good input. + cases := map[string]struct{ + input string + want want + }{ + "BadInput": { + input: "Hello!", + want: want{ + err: cmpopts.AnyError, + }, + }, + "GoodInput": { + input: "Quack!", + want: want{ + output: "Quack!", + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got, err := Quack(tc.input) + + if diff := cmp.Diff(got, tc.want.output); diff != "" { + t.Errorf("Quack(): -got, +want:\n%s", diff) + } + + if diff := cmp.Diff(err, tc.want.err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("Quack(): -got, +want:\n%s", diff) + } + }) + } +} +``` + +For more complex functions with many error cases (like `Reconciler` methods) +consider injecting dependencies that you can make return a specific sentinel +error. This way you're able to test that you got the error you'd expect given a +particular set of inputs and dependency behaviors, not another unexpected error. +For example: + +```go +func TestComplicatedQuacker(t *testing.T) { + // We'll inject this error and test we return an error that errors.Is + // (i.e. wraps) it. + errBoom := errors.New("boom") + + type want struct { + output string + err error + } + + cases := map[string]struct{ + q Quacker + input string + want want + }{ + "BadQuackModulator": { + q: &ComplicatedQuacker{ + DuckIdentifer: func() (Duck, error) { + return &MockDuck{}, nil + }, + QuackModulator: func() (int, error) { + // QuackModulator returns our sentinel + // error. + return 0, errBoom + } + }, + input: "Hello!", + want: want{ + // We want an error that errors.Is (i.e. wraps) + // our sentinel error. We don't test what error + // message it was wrapped with. + err: errBoom, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got, err := tc.q.Quack(tc.input) + + if diff := cmp.Diff(got, tc.want.output); diff != "" { + t.Errorf("q.Quack(): -got, +want:\n%s", diff) + } + + if diff := cmp.Diff(err, tc.want.err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("q.Quack(): -got, +want:\n%s", diff) + } + }) + } +} +``` + ### Scope Errors Where possible, keep errors as narrowly scoped as possible. This avoids bugs @@ -678,7 +808,7 @@ func TestExample(t *testing.T) { // even for simple comparisons to keep test output // consistent. Some Crossplane specific cmp options can // be found in crossplane-runtime/pkg/test. - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { t.Errorf("%s\nExample(...): -want, +got:\n%s", tc.reason, diff) } @@ -765,3 +895,4 @@ make run [CODEOWNERS]: ../CODEOWNERS [Reviewers]: ../OWNERS.md#reviewers [Maintainers]: ../OWNERS.md#maintainers +[#4514]: https://github.com/crossplane/crossplane/issues/4514 \ No newline at end of file From 5b27f068890f9b853e74630952173de879edd181 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Mon, 12 Feb 2024 18:24:38 -0800 Subject: [PATCH 002/370] Reorganize the growing set of badges in the main README.md Signed-off-by: Jared Watts --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 62b4396e6..fe64f2ca1 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ - -![CI](https://github.com/crossplane/crossplane/workflows/CI/badge.svg) [![GitHub release](https://img.shields.io/github/release/crossplane/crossplane/all.svg)](https://github.com/crossplane/crossplane/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/crossplane/crossplane.svg)](https://hub.docker.com/r/crossplane/crossplane) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/3260/badge)](https://www.bestpractices.dev/projects/3260) [![Go Report Card](https://goreportcard.com/badge/github.com/crossplane/crossplane)](https://goreportcard.com/report/github.com/crossplane/crossplane) [![Slack](https://img.shields.io/badge/slack-crossplane-red?logo=slack)](https://slack.crossplane.io) [![Twitter Follow](https://img.shields.io/twitter/follow/crossplane_io?logo=X&label=Follow&style=flat)](https://twitter.com/intent/follow?screen_name=crossplane_io&user_id=788180534543339520) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/crossplane)](https://artifacthub.io/packages/helm/crossplane/crossplane) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/3260/badge)](https://www.bestpractices.dev/projects/3260) ![CI](https://github.com/crossplane/crossplane/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/crossplane/crossplane)](https://goreportcard.com/report/github.com/crossplane/crossplane) ![Crossplane](banner.png) @@ -17,6 +16,8 @@ Crossplane's [Get Started Docs] cover install and cloud provider quickstarts. ## Releases +[![GitHub release](https://img.shields.io/github/release/crossplane/crossplane/all.svg)](https://github.com/crossplane/crossplane/releases) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/crossplane)](https://artifacthub.io/packages/helm/crossplane/crossplane) + Currently maintained releases, as well as the next few upcoming releases are listed below. For more information take a look at the Crossplane [release cycle documentation]. @@ -48,6 +49,8 @@ delivery timeline. ## Get Involved +[![Slack](https://img.shields.io/badge/slack-crossplane-red?logo=slack)](https://slack.crossplane.io) [![Twitter Follow](https://img.shields.io/twitter/follow/crossplane_io?logo=X&label=Follow&style=flat)](https://twitter.com/intent/follow?screen_name=crossplane_io&user_id=788180534543339520) [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UC19FgzMBMqBro361HbE46Fw)](https://www.youtube.com/@Crossplane) + Crossplane is a community driven project; we welcome your contribution. To file a bug, suggest an improvement, or request a new feature please open an [issue against Crossplane] or the relevant provider. Refer to our [contributing guide] From 002c3ad0d1e930426472339255ef434e4ad081db Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 08:38:29 +0000 Subject: [PATCH 003/370] chore(deps): update golangci/golangci-lint-action action to v4 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bde814d71..c7506d916 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -115,7 +115,7 @@ jobs: # this action because it leaves 'annotations' (i.e. it comments on PRs to # point out linter violations). - name: Lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3 + uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4 with: version: ${{ env.GOLANGCI_VERSION }} skip-cache: true # We do our own caching. From 136ad1a6b433a7fe6114586f0f7ece3a7dfa360b Mon Sep 17 00:00:00 2001 From: lsviben Date: Tue, 13 Feb 2024 16:00:35 +0100 Subject: [PATCH 004/370] improve render debug logs style Signed-off-by: lsviben --- cmd/crank/beta/render/cmd.go | 4 ++-- cmd/crank/beta/render/render.go | 8 ++++---- cmd/crank/beta/render/runtime.go | 8 ++++---- cmd/crank/beta/render/runtime_development.go | 12 ++++++++---- cmd/crank/beta/render/runtime_docker.go | 18 +++++++++++------- cmd/crank/beta/render/runtime_docker_test.go | 6 ++++-- 6 files changed, 33 insertions(+), 23 deletions(-) diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/beta/render/cmd.go index 0b93450a1..05c19144f 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/beta/render/cmd.go @@ -119,7 +119,7 @@ func (c *Cmd) AfterApply() error { } // Run render. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // Only a touch over. +func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocyclo // Only a touch over. xr, err := LoadCompositeResource(c.fs, c.CompositeResource) if err != nil { return errors.Wrapf(err, "cannot load composite resource from %q", c.CompositeResource) @@ -180,7 +180,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) defer cancel() - out, err := Render(ctx, logger, Inputs{ + out, err := Render(ctx, log, Inputs{ CompositeResource: xr, Composition: comp, Functions: fns, diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index f553a3ab3..1cd88b4ac 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -89,21 +89,21 @@ type Outputs struct { } // Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. -func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, error) { //nolint:gocyclo // TODO(negz): Should we refactor to break this up a bit? +func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocyclo // TODO(negz): Should we refactor to break this up a bit? // Run our Functions. conns := map[string]*grpc.ClientConn{} for _, fn := range in.Functions { - runtime, err := GetRuntime(fn) + runtime, err := GetRuntime(fn, log) if err != nil { return Outputs{}, errors.Wrapf(err, "cannot get runtime for Function %q", fn.GetName()) } - rctx, err := runtime.Start(ctx, logger) + rctx, err := runtime.Start(ctx) if err != nil { return Outputs{}, errors.Wrapf(err, "cannot start Function %q", fn.GetName()) } defer func() { if err := rctx.Stop(ctx); err != nil { - logger.Debug("Error stopping function runtime", "function", fn.GetName(), "error", err) + log.Debug("Error stopping function runtime", "function", fn.GetName(), "error", err) } }() diff --git a/cmd/crank/beta/render/runtime.go b/cmd/crank/beta/render/runtime.go index e8f1ac789..f83f23d0a 100644 --- a/cmd/crank/beta/render/runtime.go +++ b/cmd/crank/beta/render/runtime.go @@ -50,7 +50,7 @@ const ( // A Runtime runs a Function. type Runtime interface { // Start the Function. - Start(ctx context.Context, logger logging.Logger) (RuntimeContext, error) + Start(ctx context.Context) (RuntimeContext, error) } // RuntimeContext contains context on how a Function is being run. @@ -63,12 +63,12 @@ type RuntimeContext struct { } // GetRuntime for the supplied Function, per its annotations. -func GetRuntime(fn pkgv1beta1.Function) (Runtime, error) { +func GetRuntime(fn pkgv1beta1.Function, log logging.Logger) (Runtime, error) { switch r := RuntimeType(fn.GetAnnotations()[AnnotationKeyRuntime]); r { case AnnotationValueRuntimeDocker, "": - return GetRuntimeDocker(fn) + return GetRuntimeDocker(fn, log) case AnnotationValueRuntimeDevelopment: - return GetRuntimeDevelopment(fn), nil + return GetRuntimeDevelopment(fn, log), nil default: return nil, errors.Errorf("unsupported %q annotation value %q (unknown runtime)", AnnotationKeyRuntime, r) } diff --git a/cmd/crank/beta/render/runtime_development.go b/cmd/crank/beta/render/runtime_development.go index ec1476397..8df9eb59f 100644 --- a/cmd/crank/beta/render/runtime_development.go +++ b/cmd/crank/beta/render/runtime_development.go @@ -37,14 +37,18 @@ type RuntimeDevelopment struct { // Target is the gRPC target for the running function, for example // localhost:9443. Target string + // Function is the name of the function to be run. Function string + + // log is the logger for this runtime. + log logging.Logger } // GetRuntimeDevelopment extracts RuntimeDevelopment configuration from the // supplied Function. -func GetRuntimeDevelopment(fn pkgv1beta1.Function) *RuntimeDevelopment { - r := &RuntimeDevelopment{Target: "localhost:9443", Function: fn.GetName()} +func GetRuntimeDevelopment(fn pkgv1beta1.Function, log logging.Logger) *RuntimeDevelopment { + r := &RuntimeDevelopment{Target: "localhost:9443", Function: fn.GetName(), log: log} if t := fn.GetAnnotations()[AnnotationKeyRuntimeDevelopmentTarget]; t != "" { r.Target = t } @@ -54,7 +58,7 @@ func GetRuntimeDevelopment(fn pkgv1beta1.Function) *RuntimeDevelopment { var _ Runtime = &RuntimeDevelopment{} // Start does nothing. It returns a Stop function that also does nothing. -func (r *RuntimeDevelopment) Start(_ context.Context, logger logging.Logger) (RuntimeContext, error) { - logger.Debug("Starting development runtime. Remember to run the function manually.", "function", r.Function, "target", r.Target) +func (r *RuntimeDevelopment) Start(_ context.Context) (RuntimeContext, error) { + r.log.Debug("Starting development runtime. Remember to run the function manually.", "function", r.Function, "target", r.Target) return RuntimeContext{Target: r.Target, Stop: func(_ context.Context) error { return nil }}, nil } diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/beta/render/runtime_docker.go index fde36cd58..cb07e1832 100644 --- a/cmd/crank/beta/render/runtime_docker.go +++ b/cmd/crank/beta/render/runtime_docker.go @@ -95,6 +95,9 @@ type RuntimeDocker struct { // PullPolicy controls how the runtime image is pulled. PullPolicy DockerPullPolicy + + // log is the logger for this runtime. + log logging.Logger } // GetDockerPullPolicy extracts PullPolicy configuration from the supplied @@ -124,7 +127,7 @@ func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { // GetRuntimeDocker extracts RuntimeDocker configuration from the supplied // Function. -func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { +func GetRuntimeDocker(fn pkgv1beta1.Function, log logging.Logger) (*RuntimeDocker, error) { cleanup, err := GetDockerCleanup(fn) if err != nil { return nil, errors.Wrapf(err, "cannot get cleanup policy for Function %q", fn.GetName()) @@ -140,6 +143,7 @@ func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { Image: fn.Spec.Package, Stop: cleanup == AnnotationValueRuntimeDockerCleanupStop, PullPolicy: pullPolicy, + log: log, } if i := fn.GetAnnotations()[AnnotationKeyRuntimeDockerImage]; i != "" { r.Image = i @@ -150,8 +154,8 @@ func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { var _ Runtime = &RuntimeDocker{} // Start a Function as a Docker container. -func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (RuntimeContext, error) { //nolint:gocyclo // TODO(phisco): Refactor to break this up a bit, not so easy. - logger.Debug("Starting Docker container runtime", "image", r.Image) +func (r *RuntimeDocker) Start(ctx context.Context) (RuntimeContext, error) { //nolint:gocyclo // TODO(phisco): Refactor to break this up a bit, not so easy. + r.log.Debug("Starting Docker container runtime", "image", r.Image) c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return RuntimeContext{}, errors.Wrap(err, "cannot create Docker client using environment variables") @@ -182,7 +186,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } if r.PullPolicy == AnnotationValueRuntimeDockerPullPolicyAlways { - logger.Debug("Pulling image with pullPolicy: Always", "image", r.Image) + r.log.Debug("Pulling image with pullPolicy: Always", "image", r.Image) err = PullImage(ctx, c, r.Image) if err != nil { return RuntimeContext{}, errors.Wrapf(err, "cannot pull Docker image %q", r.Image) @@ -190,7 +194,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } // TODO(negz): Set a container name? Presumably unique across runs. - logger.Debug("Creating Docker container", "image", r.Image, "address", addr) + r.log.Debug("Creating Docker container", "image", r.Image, "address", addr) rsp, err := c.ContainerCreate(ctx, cfg, hcfg, nil, nil, "") if err != nil { if !errdefs.IsNotFound(err) || r.PullPolicy == AnnotationValueRuntimeDockerPullPolicyNever { @@ -198,7 +202,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } // The image was not found, but we're allowed to pull it. - logger.Debug("Image not found, pulling", "image", r.Image) + r.log.Debug("Image not found, pulling", "image", r.Image) err = PullImage(ctx, c, r.Image) if err != nil { return RuntimeContext{}, errors.Wrapf(err, "cannot pull Docker image %q", r.Image) @@ -215,7 +219,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } stop := func(_ context.Context) error { - logger.Debug("Container left running", "container", rsp.ID, "image", r.Image) + r.log.Debug("Container left running", "container", rsp.ID, "image", r.Image) return nil } if r.Stop { diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/beta/render/runtime_docker_test.go index 5646a0d7c..4554a5b70 100644 --- a/cmd/crank/beta/render/runtime_docker_test.go +++ b/cmd/crank/beta/render/runtime_docker_test.go @@ -26,6 +26,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + v1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) @@ -145,8 +147,8 @@ func TestGetRuntimeDocker(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - rd, err := GetRuntimeDocker(tc.args.fn) - if diff := cmp.Diff(tc.want.rd, rd); diff != "" { + rd, err := GetRuntimeDocker(tc.args.fn, logging.NewNopLogger()) + if diff := cmp.Diff(tc.want.rd, rd, cmpopts.IgnoreUnexported(RuntimeDocker{})); diff != "" { t.Errorf("\n%s\nGetRuntimeDocker(...): -want, +got:\n%s", tc.reason, diff) } if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { From 1b58adf55603e1105305b6a802bb6058fae9acac Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 13 Feb 2024 09:39:17 -0800 Subject: [PATCH 005/370] Bump golangci-lint Most of this diff is just renaming unused arguments to _. I believe golint used to enforce this. It was then abandoned, and we ended up with a mix of using _ and not using it. The revive linter now catches this, so we can be consistent. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 2 +- Makefile | 2 +- cmd/crank/beta/render/render_test.go | 2 +- cmd/crank/beta/render/runtime_docker_test.go | 2 +- .../internal/resource/xpkg/client_test.go | 2 +- .../apiextensions/claim/fuzz_test.go | 4 +- .../apiextensions/claim/reconciler_test.go | 44 ++++----- .../apiextensions/claim/syncer_csa_test.go | 10 +-- .../apiextensions/claim/syncer_ssa_test.go | 14 +-- .../apiextensions/composite/api_test.go | 8 +- .../composite/composition_functions_test.go | 90 +++++++++---------- .../composite/composition_pt_test.go | 70 +++++++-------- .../composite/composition_render_test.go | 2 +- .../composite/connection_test.go | 8 +- .../composite/environment_fetcher_test.go | 2 +- .../composite/environment_selector_test.go | 2 +- .../apiextensions/composite/fuzz_test.go | 4 +- .../composite/reconciler_test.go | 74 +++++++-------- .../apiextensions/composition/fuzz_test.go | 2 +- .../composition/reconciler_test.go | 4 +- .../definition/reconciler_test.go | 30 +++---- .../apiextensions/offered/reconciler_test.go | 4 +- .../apiextensions/usage/reconciler.go | 2 +- .../apiextensions/usage/reconciler_test.go | 48 +++++----- .../apiextensions/usage/selector_test.go | 22 ++--- .../controller/pkg/manager/reconciler_test.go | 2 +- .../pkg/resolver/reconciler_test.go | 16 ++-- .../pkg/revision/dependency_test.go | 14 +-- .../pkg/revision/establisher_test.go | 30 +++---- internal/controller/pkg/revision/fuzz_test.go | 8 +- .../pkg/revision/reconciler_test.go | 30 +++---- .../pkg/revision/runtime_function_test.go | 70 +++++++-------- .../pkg/revision/runtime_provider_test.go | 70 +++++++-------- .../rbac/definition/reconciler_test.go | 2 +- .../controller/rbac/namespace/fuzz_test.go | 2 +- .../rbac/namespace/reconciler_test.go | 2 +- .../rbac/provider/roles/fuzz_test.go | 2 +- .../rbac/provider/roles/reconciler_test.go | 6 +- internal/dag/fuzz_test.go | 2 +- .../deployment_runtime_config_test.go | 6 +- internal/initializer/installer_test.go | 14 +-- internal/initializer/lock_test.go | 2 +- internal/initializer/store_config_test.go | 6 +- internal/initializer/tls_test.go | 52 +++++------ internal/initializer/waiter_test.go | 6 +- internal/names/generate_test.go | 2 +- internal/usage/handler_test.go | 10 +-- internal/xcrd/fuzz_test.go | 4 +- internal/xpkg/build.go | 2 +- internal/xpkg/fake/mocks.go | 2 +- internal/xpkg/upbound/resolver.go | 2 +- test/e2e/funcs/env.go | 2 +- test/e2e/funcs/feature.go | 2 +- 53 files changed, 411 insertions(+), 411 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bde814d71..11b17fe8f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ on: env: # Common versions GO_VERSION: '1.22.0' - GOLANGCI_VERSION: 'v1.55.2' + GOLANGCI_VERSION: 'v1.56.2' DOCKER_BUILDX_VERSION: 'v0.10.0' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run diff --git a/Makefile b/Makefile index b4c6d4aeb..bb1c3ad18 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION) GO_SUBDIRS += cmd internal apis GO111MODULE = on -GOLANGCILINT_VERSION = 1.55.2 +GOLANGCILINT_VERSION = 1.56.2 GO_LINT_ARGS ?= "--fix" -include build/makelib/golang.mk diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/beta/render/render_test.go index 72150e227..499da3107 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/beta/render/render_test.go @@ -546,7 +546,7 @@ func TestRender(t *testing.T) { Functions: []pkgv1beta1.Function{ func() pkgv1beta1.Function { i := 0 - lis := NewFunctionWithRunFunc(t, func(ctx context.Context, request *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { + lis := NewFunctionWithRunFunc(t, func(_ context.Context, request *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { defer func() { i++ }() switch i { case 0: diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/beta/render/runtime_docker_test.go index 4554a5b70..467593369 100644 --- a/cmd/crank/beta/render/runtime_docker_test.go +++ b/cmd/crank/beta/render/runtime_docker_test.go @@ -33,7 +33,7 @@ import ( ) type mockPullClient struct { - MockPullImage func(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + MockPullImage func(_ context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) } func (m *mockPullClient) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go index 725c1a620..e697e9ee0 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go @@ -145,7 +145,7 @@ func TestGetDependencyRef(t *testing.T) { pkgType: v1beta1.FunctionPackageType, pkg: "example.com/function-1:v1.0.0", client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + MockGet: test.NewMockGetFn(nil, func(_ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, "whatever") }), }, diff --git a/internal/controller/apiextensions/claim/fuzz_test.go b/internal/controller/apiextensions/claim/fuzz_test.go index 8a62cc088..411094bfb 100644 --- a/internal/controller/apiextensions/claim/fuzz_test.go +++ b/internal/controller/apiextensions/claim/fuzz_test.go @@ -31,7 +31,7 @@ import ( ) func FuzzPropagateConnection(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) cp := &fake.Composite{} cm := &fake.CompositeClaim{} @@ -62,7 +62,7 @@ func FuzzPropagateConnection(f *testing.F) { return nil }), }, - Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return nil }), } diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index 6d0da73b5..904ff2604 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -132,10 +132,10 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return true, nil })), }, @@ -235,7 +235,7 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -261,7 +261,7 @@ func TestReconcile(t *testing.T) { cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) })), }), - WithConnectionUnpublisher(ConnectionUnpublisherFn(func(ctx context.Context, so resource.LocalConnectionSecretOwner, c managed.ConnectionDetails) error { + WithConnectionUnpublisher(ConnectionUnpublisherFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom })), }, @@ -289,7 +289,7 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return errBoom }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), }, }, @@ -316,7 +316,7 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -350,7 +350,7 @@ func TestReconcile(t *testing.T) { MockDelete: test.NewMockDeleteFn(nil), }), WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -385,7 +385,7 @@ func TestReconcile(t *testing.T) { }), }), WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -406,7 +406,7 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return errBoom }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), }, }, @@ -427,9 +427,9 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return errBoom })), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return errBoom })), }, }, want: want{ @@ -465,9 +465,9 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), }, }, want: want{ @@ -501,10 +501,10 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return false, errBoom })), }, @@ -542,10 +542,10 @@ func TestReconcile(t *testing.T) { })), }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return true, nil })), }, @@ -582,8 +582,8 @@ func NewClaim(m ...ClaimModifier) *claim.Unstructured { } // A status update function that ensures the supplied object is the claim we want. -func WantClaim(t *testing.T, want *claim.Unstructured) func(ctx context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { - return func(ctx context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { +func WantClaim(t *testing.T, want *claim.Unstructured) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we're using unstructured types here where // the conditions are just a map[string]any. diff --git a/internal/controller/apiextensions/claim/syncer_csa_test.go b/internal/controller/apiextensions/claim/syncer_csa_test.go index 5c88eadd4..863caff2a 100644 --- a/internal/controller/apiextensions/claim/syncer_csa_test.go +++ b/internal/controller/apiextensions/claim/syncer_csa_test.go @@ -88,7 +88,7 @@ func TestClientSideSync(t *testing.T) { "GenerateXRNameError": { reason: "We should return an error if we can't generate an XR name.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return errBoom }), }, @@ -132,7 +132,7 @@ func TestClientSideSync(t *testing.T) { "UpdateClaimResourceRefError": { reason: "We should return an error if we can't update the claim to persist its resourceRef.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -184,7 +184,7 @@ func TestClientSideSync(t *testing.T) { "ApplyXRError": { reason: "We should return an error if we can't apply the XR.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -239,7 +239,7 @@ func TestClientSideSync(t *testing.T) { "UpdateClaimStatusError": { reason: "We should return an error if we can't update the claim's status.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -296,7 +296,7 @@ func TestClientSideSync(t *testing.T) { "XRDoesNotExist": { reason: "We should create, bind, and sync with an XR when none exists.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), diff --git a/internal/controller/apiextensions/claim/syncer_ssa_test.go b/internal/controller/apiextensions/claim/syncer_ssa_test.go index 31ea68736..1d67e9584 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa_test.go +++ b/internal/controller/apiextensions/claim/syncer_ssa_test.go @@ -65,7 +65,7 @@ func TestServerSideSync(t *testing.T) { "GenerateXRNameError": { reason: "We should return an error if we can't generate an XR name.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return errBoom }), }, @@ -88,7 +88,7 @@ func TestServerSideSync(t *testing.T) { "WeirdClaimSpec": { reason: "We should return an error if the claim spec is not an object.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -117,7 +117,7 @@ func TestServerSideSync(t *testing.T) { // Fail to update the claim. MockUpdate: test.NewMockUpdateFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -170,7 +170,7 @@ func TestServerSideSync(t *testing.T) { // Fail to patch the XR. MockPatch: test.NewMockPatchFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -228,7 +228,7 @@ func TestServerSideSync(t *testing.T) { return nil }), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -285,7 +285,7 @@ func TestServerSideSync(t *testing.T) { // Fail to update the claim's status. MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -345,7 +345,7 @@ func TestServerSideSync(t *testing.T) { // Update the claim's status. MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { // Generate a name for the XR. cd.SetName("cool-claim-random") return nil diff --git a/internal/controller/apiextensions/composite/api_test.go b/internal/controller/apiextensions/composite/api_test.go index 850ddf3de..f2ff858ea 100644 --- a/internal/controller/apiextensions/composite/api_test.go +++ b/internal/controller/apiextensions/composite/api_test.go @@ -323,7 +323,7 @@ func TestFetchRevision(t *testing.T) { }), }, // This should not be called. - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { return errBoom }), + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, args: args{ cr: &fake.Composite{ @@ -359,7 +359,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { // Ensure we were updated to reference the latest CompositionRevision. want := &fake.Composite{ CompositionReferencer: fake.CompositionReferencer{ @@ -416,7 +416,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { // Ensure we were updated to reference the latest CompositionRevision. want := &fake.Composite{ CompositionReferencer: fake.CompositionReferencer{ @@ -474,7 +474,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 513f2e399..8a9ac5274 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -80,10 +80,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while fetching the XR's connection details.", params: params{ o: []FunctionComposerOption{ - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return ComposedResourceStates{}, nil })), - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -100,10 +100,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while getting the XR's existing composed resources.", params: params{ o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, errBoom })), }, @@ -120,10 +120,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while unmarshalling a Composition Function input", params: params{ o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -151,14 +151,14 @@ func TestFunctionCompose(t *testing.T) { "RunFunctionError": { reason: "We should return any error encountered while running a Composition Function", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { return nil, errBoom }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -185,7 +185,7 @@ func TestFunctionCompose(t *testing.T) { "FatalFunctionResultError": { reason: "We should return any fatal function results as an error", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { r := &v1beta1.Result{ Severity: v1beta1.Severity_SEVERITY_FATAL, Message: "oh no", @@ -193,10 +193,10 @@ func TestFunctionCompose(t *testing.T) { return &v1beta1.RunFunctionResponse{Results: []*v1beta1.Result{r}}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -223,7 +223,7 @@ func TestFunctionCompose(t *testing.T) { "RenderComposedResourceMetadataError": { reason: "We should return any error we encounter when rendering composed resource metadata", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { d := &v1beta1.State{ Resources: map[string]*v1beta1.Resource{ "cool-resource": { @@ -237,10 +237,10 @@ func TestFunctionCompose(t *testing.T) { return &v1beta1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -271,7 +271,7 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockGet: test.NewMockGetFn(errBoom), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { d := &v1beta1.State{ Resources: map[string]*v1beta1.Resource{ "cool-resource": { @@ -287,10 +287,10 @@ func TestFunctionCompose(t *testing.T) { return &v1beta1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -320,17 +320,17 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockPatch: test.NewMockPatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { return &v1beta1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return errBoom })), }, @@ -367,17 +367,17 @@ func TestFunctionCompose(t *testing.T) { return errBoom }), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { return &v1beta1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -408,7 +408,7 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(errBoom), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { d := &v1beta1.State{ Composite: &v1beta1.Resource{ Resource: MustStruct(map[string]any{ @@ -420,13 +420,13 @@ func TestFunctionCompose(t *testing.T) { return &v1beta1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -466,7 +466,7 @@ func TestFunctionCompose(t *testing.T) { }), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { d := &v1beta1.State{ Resources: map[string]*v1beta1.Resource{ "uncool-resource": { @@ -480,13 +480,13 @@ func TestFunctionCompose(t *testing.T) { return &v1beta1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -518,7 +518,7 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { rsp := &v1beta1.RunFunctionResponse{ Desired: &v1beta1.State{ Composite: &v1beta1.Resource{ @@ -563,10 +563,10 @@ func TestFunctionCompose(t *testing.T) { return rsp, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { // We only try to extract connection details for // observed resources. r := ComposedResourceStates{ @@ -578,7 +578,7 @@ func TestFunctionCompose(t *testing.T) { } return r, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -651,7 +651,7 @@ func TestFunctionCompose(t *testing.T) { }, r: func() FunctionRunner { var nrCalls int - return FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + return FunctionRunnerFn(func(_ context.Context, _ string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { defer func() { nrCalls++ }() requirements := &v1beta1.Requirements{ ExtraResources: map[string]*v1beta1.ResourceSelector{ @@ -738,10 +738,10 @@ func TestFunctionCompose(t *testing.T) { }) }(), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { // We only try to extract connection details for // observed resources. r := ComposedResourceStates{ @@ -753,10 +753,10 @@ func TestFunctionCompose(t *testing.T) { } return r, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), - WithExtraResourcesFetcher(ExtraResourcesFetcherFn(func(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + WithExtraResourcesFetcher(ExtraResourcesFetcherFn(func(_ context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { if rs.GetMatchName() == "existing" { return &v1beta1.Resources{ Items: []*v1beta1.Resource{ @@ -1019,7 +1019,7 @@ func TestGetComposedResources(t *testing.T) { return nil }), }, - f: ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + f: ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom }), }, @@ -1049,7 +1049,7 @@ func TestGetComposedResources(t *testing.T) { return nil }), }, - f: ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + f: ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return details, nil }), }, diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index a8147db1a..464b4351f 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -94,7 +94,7 @@ func TestPTCompose(t *testing.T) { reason: "We should return any error encountered while associating Composition templates with composed resources.", params: params{ o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { return nil, errBoom })), }, @@ -112,7 +112,7 @@ func TestPTCompose(t *testing.T) { reason: "We should return any error encountered while parsing a composed resource base template", params: params{ o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{ { Template: v1.ComposedTemplate{ @@ -123,14 +123,14 @@ func TestPTCompose(t *testing.T) { } return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -152,7 +152,7 @@ func TestPTCompose(t *testing.T) { MockUpdate: test.NewMockUpdateFn(errBoom), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -161,7 +161,7 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), }, }, args: args{ @@ -184,7 +184,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(errBoom), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -193,7 +193,7 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), }, }, args: args{ @@ -216,7 +216,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -225,8 +225,8 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -251,7 +251,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -260,11 +260,11 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -289,7 +289,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -298,14 +298,14 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, cd resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return false, errBoom })), }, @@ -333,7 +333,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { return nil, nil })), }, @@ -360,7 +360,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -369,14 +369,14 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -409,7 +409,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{ { Template: v1.ComposedTemplate{ @@ -428,19 +428,19 @@ func TestPTCompose(t *testing.T) { } return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { if cd.GetObjectKind().GroupVersionKind().Kind == "BrokenResource" { return errBoom } return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -645,7 +645,7 @@ func TestGarbageCollectingAssociator(t *testing.T) { }, }, "ResourceControlledBySomeoneElse": { - reason: "We should not garbage collect a resource that is controlled by another resource.", + reason: "We should not garbage colle_ a resource that is controlled by another resource.", c: &test.MockClient{ MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { // The template used to create this resource is no longer known to us. @@ -673,7 +673,7 @@ func TestGarbageCollectingAssociator(t *testing.T) { }, }, "ResourceNotControlled": { - reason: "We should not garbage collect a resource that has no controller reference.", + reason: "We should not garbage colle_ a resource that has no controller reference.", c: &test.MockClient{ MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { // The template used to create this resource is no longer known to us. diff --git a/internal/controller/apiextensions/composite/composition_render_test.go b/internal/controller/apiextensions/composite/composition_render_test.go index 59697114d..e4ecfb8a3 100644 --- a/internal/controller/apiextensions/composite/composition_render_test.go +++ b/internal/controller/apiextensions/composite/composition_render_test.go @@ -36,7 +36,7 @@ import ( ) func TestRenderFromJSON(t *testing.T) { - errInvalidChar := json.Unmarshal([]byte("olala"), &fake.Composed{}) + errInvalidChar := json.Unmarshal([]byte("olala"), &fake.Composed{}) //nolint:musttag // Not an issue in this test. type args struct { o resource.Object diff --git a/internal/controller/apiextensions/composite/connection_test.go b/internal/controller/apiextensions/composite/connection_test.go index 336120541..8aa540ec5 100644 --- a/internal/controller/apiextensions/composite/connection_test.go +++ b/internal/controller/apiextensions/composite/connection_test.go @@ -177,7 +177,7 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "SingleFetcherChain": { reason: "A chain of one fetcher should return only its connection details.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{"a": []byte("b")}, nil }), }, @@ -191,7 +191,7 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "FetcherError": { reason: "We should return errors from a chained fetcher.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom }), }, @@ -205,14 +205,14 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "MultipleFetcherChain": { reason: "A chain of multiple fetchers should return all of their connection details, with later fetchers winning if there are duplicates.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{ "a": []byte("a"), "b": []byte("b"), "c": []byte("c"), }, nil }), - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{ "a": []byte("A"), }, nil diff --git a/internal/controller/apiextensions/composite/environment_fetcher_test.go b/internal/controller/apiextensions/composite/environment_fetcher_test.go index 05dc4864c..89215b6e7 100644 --- a/internal/controller/apiextensions/composite/environment_fetcher_test.go +++ b/internal/controller/apiextensions/composite/environment_fetcher_test.go @@ -212,7 +212,7 @@ func TestFetch(t *testing.T) { reason: "It should merge the data of multiple EnvironmentConfigs in the order they are listed.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, o client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, o client.Object) error { cs := o.(*v1alpha1.EnvironmentConfig) switch key.Name { case "a": diff --git a/internal/controller/apiextensions/composite/environment_selector_test.go b/internal/controller/apiextensions/composite/environment_selector_test.go index f9ec23755..e4a32f17c 100644 --- a/internal/controller/apiextensions/composite/environment_selector_test.go +++ b/internal/controller/apiextensions/composite/environment_selector_test.go @@ -227,7 +227,7 @@ func TestSelect(t *testing.T) { reason: "It should create a name reference for the first selected EnvironmentConfig that matches the labels.", args: args{ kube: &test.MockClient{ - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { list := obj.(*v1alpha1.EnvironmentConfigList) match := opts[0].(client.MatchingLabels) if match["foo"] != "test-composite" { diff --git a/internal/controller/apiextensions/composite/fuzz_test.go b/internal/controller/apiextensions/composite/fuzz_test.go index b5a7270d3..0844d869d 100644 --- a/internal/controller/apiextensions/composite/fuzz_test.go +++ b/internal/controller/apiextensions/composite/fuzz_test.go @@ -66,7 +66,7 @@ func addType(p *v1.Patch, i int) { } func FuzzPatchApply(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) cp := &fake.Composite{} @@ -122,7 +122,7 @@ func addTransformType(t *v1.Transform, i int) error { } func FuzzTransform(f *testing.F) { - f.Fuzz(func(tt *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) t := &v1.Transform{} diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 76f8b74ba..3e7dc87f7 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -50,7 +50,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -var _ Composer = ComposerSelectorFn(func(cm *v1.CompositionMode) Composer { return nil }) +var _ Composer = ComposerSelectorFn(func(_ *v1.CompositionMode) Composer { return nil }) func TestReconcile(t *testing.T) { errBoom := errors.New("boom") @@ -117,7 +117,7 @@ func TestReconcile(t *testing.T) { }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom }, }), @@ -142,12 +142,12 @@ func TestReconcile(t *testing.T) { })), }), WithCompositeFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return nil }, }), @@ -172,12 +172,12 @@ func TestReconcile(t *testing.T) { })), }), WithCompositeFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return nil }, }), @@ -199,7 +199,7 @@ func TestReconcile(t *testing.T) { })), }), WithCompositeFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -325,7 +325,7 @@ func TestReconcile(t *testing.T) { MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), }), WithCompositeFinalizer(resource.NewNopFinalizer()), - WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil })), WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { @@ -333,8 +333,8 @@ func TestReconcile(t *testing.T) { return c, nil })), WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), - WithConfigurator(ConfiguratorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { return nil })), - WithEnvironmentSelector(EnvironmentSelectorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), + WithEnvironmentSelector(EnvironmentSelectorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return errBoom })), }, @@ -353,7 +353,7 @@ func TestReconcile(t *testing.T) { MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), }), WithCompositeFinalizer(resource.NewNopFinalizer()), - WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil })), WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { @@ -361,8 +361,8 @@ func TestReconcile(t *testing.T) { return c, nil })), WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), - WithConfigurator(ConfiguratorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { return nil })), - WithEnvironmentFetcher(EnvironmentFetcherFn(func(ctx context.Context, req EnvironmentFetcherRequest) (*Environment, error) { + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), + WithEnvironmentFetcher(EnvironmentFetcherFn(func(_ context.Context, _ EnvironmentFetcherRequest) (*Environment, error) { return nil, errBoom })), }, @@ -395,7 +395,7 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, errBoom })), }, @@ -428,11 +428,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, errBoom }, }), @@ -449,9 +449,9 @@ func TestReconcile(t *testing.T) { opts: []ReconcilerOption{ WithClient(&test.MockClient{ MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { + xr.SetCompositionReference(&corev1.ObjectReference{}) + xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) })), }), WithCompositeFinalizer(resource.NewNopFinalizer()), @@ -469,13 +469,13 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ Events: []event.Event{event.Warning("Warning", errBoom)}, }, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, nil }, }), @@ -512,7 +512,7 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ Composed: []ComposedResource{{ ResourceName: "elephant", @@ -536,7 +536,7 @@ func TestReconcile(t *testing.T) { }, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, nil }, }), @@ -574,11 +574,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ConnectionDetails: cd}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { want := cd if diff := cmp.Diff(want, got); diff != "" { t.Errorf("PublishConnection(...): -want, +got:\n%s", diff) @@ -661,11 +661,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return true, nil }, }), @@ -707,11 +707,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return true, nil }, }), @@ -759,8 +759,8 @@ func WithComposite(_ *testing.T, cr *composite.Unstructured) func(_ context.Cont } // A status update function that ensures the supplied object is the XR we want. -func WantComposite(t *testing.T, want resource.Composite) func(ctx context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { - return func(ctx context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { +func WantComposite(t *testing.T, want resource.Composite) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we may be using unstructured types here where // the conditions are just a map[string]any. @@ -839,7 +839,7 @@ func TestFilterToXRPatches(t *testing.T) { func TestEnqueueForCompositionRevisionFunc(t *testing.T) { type args struct { of schema.GroupVersionKind - list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error + list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error event runtimeevent.CreateEvent } type want struct { @@ -858,7 +858,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { name: "empty", args: args{ of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + list: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { // test parameters only here, not in the later tests for brevity. u, ok := list.(*kunstructured.UnstructuredList) if !ok { @@ -877,7 +877,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { name: "automatic management policy", args: args{ of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { var obj1 composite.Unstructured obj1.SetNamespace("ns") obj1.SetName("obj1") @@ -911,7 +911,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { name: "manual management policy", args: args{ of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { var obj1 composite.Unstructured obj1.SetNamespace("ns") obj1.SetName("obj1") @@ -940,7 +940,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { name: "other composition", args: args{ of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { var obj1 composite.Unstructured obj1.SetNamespace("ns") obj1.SetName("obj1") @@ -969,7 +969,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { name: "multiple", args: args{ of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { var obj1 composite.Unstructured obj1.SetNamespace("ns") obj1.SetName("obj1") diff --git a/internal/controller/apiextensions/composition/fuzz_test.go b/internal/controller/apiextensions/composition/fuzz_test.go index 99bd21ae4..32e50ca44 100644 --- a/internal/controller/apiextensions/composition/fuzz_test.go +++ b/internal/controller/apiextensions/composition/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzNewCompositionRevision(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) c := &v1.Composition{} f.GenerateStruct(c) diff --git a/internal/controller/apiextensions/composition/reconciler_test.go b/internal/controller/apiextensions/composition/reconciler_test.go index 96bc63627..7529668fe 100644 --- a/internal/controller/apiextensions/composition/reconciler_test.go +++ b/internal/controller/apiextensions/composition/reconciler_test.go @@ -249,7 +249,7 @@ func TestReconcile(t *testing.T) { *obj.(*v1.Composition) = *compDev return nil }), - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { if len(opts) < 1 || opts[0].(client.MatchingLabels)[v1.LabelCompositionName] != compDev.Name { t.Errorf("unexpected list options: %v", opts) } @@ -287,7 +287,7 @@ func TestReconcile(t *testing.T) { *obj.(*v1.Composition) = *compDev return nil }), - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { if len(opts) < 1 || opts[0].(client.MatchingLabels)[v1.LabelCompositionName] != compDev.Name { t.Errorf("unexpected list options: %v", opts) } diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index a65f7f664..b78348cc7 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -534,7 +534,7 @@ func TestReconcile(t *testing.T) { mgr: &mockManager{ GetCacheFn: func() cache.Cache { return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, + ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, } }, GetClientFn: func() client.Client { @@ -585,7 +585,7 @@ func TestReconcile(t *testing.T) { mgr: &mockManager{ GetCacheFn: func() cache.Cache { return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, + ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, } }, GetClientFn: func() client.Client { @@ -627,16 +627,16 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return errBoom }, // This error should only be logged. + MockErr: func(_ string) error { return errBoom }, // This error should only be logged. MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { return mockNamedController{ - MockStart: func(ctx context.Context) error { return nil }, + MockStart: func(_ context.Context) error { return nil }, MockGetCache: func() cache.Cache { return &mockCache{ - IndexFieldFn: func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { return nil }, - WaitForCacheSyncFn: func(ctx context.Context) bool { + WaitForCacheSyncFn: func(_ context.Context) bool { return true }, } @@ -656,7 +656,7 @@ func TestReconcile(t *testing.T) { mgr: &mockManager{ GetCacheFn: func() cache.Cache { return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, + ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, } }, GetClientFn: func() client.Client { @@ -711,16 +711,16 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return nil }, + MockErr: func(_ string) error { return nil }, MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { return mockNamedController{ - MockStart: func(ctx context.Context) error { return nil }, + MockStart: func(_ context.Context) error { return nil }, MockGetCache: func() cache.Cache { return &mockCache{ - IndexFieldFn: func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { return nil }, - WaitForCacheSyncFn: func(ctx context.Context) bool { + WaitForCacheSyncFn: func(_ context.Context) bool { return true }, } @@ -768,7 +768,7 @@ func TestReconcile(t *testing.T) { } type mockNamedController struct { - MockStart func(ctx context.Context) error + MockStart func(_ context.Context) error MockGetCache func() cache.Cache } @@ -823,9 +823,9 @@ func (m *mockManager) GetControllerOptions() ctrlconfig.Controller { type mockCache struct { cache.Cache - ListFn func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error - IndexFieldFn func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error - WaitForCacheSyncFn func(ctx context.Context) bool + ListFn func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error + IndexFieldFn func(_ context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error + WaitForCacheSyncFn func(_ context.Context) bool } func (m *mockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index 82ecb0fdd..c47cfb63f 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -595,7 +595,7 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return errBoom }, // This error should only be logged. + MockErr: func(_ string) error { return errBoom }, // This error should only be logged. MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }}, ), }, @@ -654,7 +654,7 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return nil }, + MockErr: func(_ string) error { return nil }, MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }, MockStop: func(_ string) {}, }), diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index e66b2df54..3de23a37b 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -444,7 +444,7 @@ func detailsAnnotation(u *v1alpha1.Usage) string { // composite controller since otherwise we lose the owner reference this // controller puts on the Usage. func RespectOwnerRefs() xpresource.ApplyOption { - return func(ctx context.Context, current, desired runtime.Object) error { + return func(_ context.Context, current, desired runtime.Object) error { cu, ok := current.(*composed.Unstructured) if !ok || cu.GetObjectKind().GroupVersionKind() != v1alpha1.UsageGroupVersionKind { return nil diff --git a/internal/controller/apiextensions/usage/reconciler_test.go b/internal/controller/apiextensions/usage/reconciler_test.go index 3b10e3d1f..2b076325d 100644 --- a/internal/controller/apiextensions/usage/reconciler_test.go +++ b/internal/controller/apiextensions/usage/reconciler_test.go @@ -42,7 +42,7 @@ import ( ) type fakeSelectorResolver struct { - resourceSelectorFn func(ctx context.Context, u *v1alpha1.Usage) error + resourceSelectorFn func(_ context.Context, _ *v1alpha1.Usage) error } func (f fakeSelectorResolver) resolveSelectors(ctx context.Context, u *v1alpha1.Usage) error { @@ -118,7 +118,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return errBoom }, }), @@ -143,7 +143,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -172,7 +172,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -201,13 +201,13 @@ func TestReconcile(t *testing.T) { } return nil }), - MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + MockUpdate: test.NewMockUpdateFn(nil, func(_ client.Object) error { return nil }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -245,7 +245,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -283,7 +283,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -332,7 +332,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -391,7 +391,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -440,7 +440,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -474,7 +474,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -508,7 +508,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -550,7 +550,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -582,13 +582,13 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return errBoom }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -620,16 +620,16 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), - MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + MockUpdate: test.NewMockUpdateFn(nil, func(_ client.Object) error { return errBoom }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -663,7 +663,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -695,7 +695,7 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { @@ -710,7 +710,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -753,7 +753,7 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { @@ -768,7 +768,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), diff --git a/internal/controller/apiextensions/usage/selector_test.go b/internal/controller/apiextensions/usage/selector_test.go index 5087a827c..e6ce4bfb7 100644 --- a/internal/controller/apiextensions/usage/selector_test.go +++ b/internal/controller/apiextensions/usage/selector_test.go @@ -155,7 +155,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot list the used resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -181,7 +181,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot list the using resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -214,7 +214,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot update the usage after resolving used resource.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "SomeKindList": @@ -234,7 +234,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -260,7 +260,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot update the usage after resolving using resource.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "AnotherKindList": @@ -280,7 +280,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -313,7 +313,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if there are no matching resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, }, @@ -340,7 +340,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if there are no matching resources with controller ref.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "SomeKindList": @@ -360,7 +360,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -387,7 +387,7 @@ func TestResolveSelectors(t *testing.T) { reason: "If selectors defined for both \"of\" and \"by\", both should be resolved.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { l := list.(*composed.UnstructuredList) if v := l.GroupVersionKind().Version; v != "v1" { t.Errorf("unexpected list version: %s", v) @@ -437,7 +437,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, diff --git a/internal/controller/pkg/manager/reconciler_test.go b/internal/controller/pkg/manager/reconciler_test.go index 6f5ce175f..b9da46017 100644 --- a/internal/controller/pkg/manager/reconciler_test.go +++ b/internal/controller/pkg/manager/reconciler_test.go @@ -489,7 +489,7 @@ func TestReconcile(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, diff --git a/internal/controller/pkg/resolver/reconciler_test.go b/internal/controller/pkg/resolver/reconciler_test.go index bce8eb97c..b8cfa7efd 100644 --- a/internal/controller/pkg/resolver/reconciler_test.go +++ b/internal/controller/pkg/resolver/reconciler_test.go @@ -165,7 +165,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, errBoom }, } @@ -201,7 +201,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockSort: func() ([]string, error) { @@ -240,7 +240,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockSort: func() ([]string, error) { @@ -279,7 +279,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "not.a.valid.package", @@ -322,7 +322,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-b", @@ -369,7 +369,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-b", @@ -417,7 +417,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-c", @@ -466,7 +466,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-c", diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index 3bfc3d922..d0dc45daf 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -173,7 +173,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { @@ -200,14 +200,14 @@ func TestResolve(t *testing.T) { args: args{ dep: &PackageDependencyManager{ client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + MockGet: test.NewMockGetFn(nil, func(_ client.Object) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil), }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockNodeExists: func(_ string) bool { @@ -287,7 +287,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "not-here-2", @@ -375,7 +375,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { @@ -478,10 +478,10 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, - MockNodeExists: func(identifier string) bool { + MockNodeExists: func(_ string) bool { return true }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { diff --git a/internal/controller/pkg/revision/establisher_test.go b/internal/controller/pkg/revision/establisher_test.go index ee12d8c10..e33485845 100644 --- a/internal/controller/pkg/revision/establisher_test.go +++ b/internal/controller/pkg/revision/establisher_test.go @@ -141,7 +141,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if s, ok := obj.(*corev1.Secret); ok { (&corev1.Secret{ Data: map[string][]byte{ @@ -353,7 +353,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { s := &corev1.Secret{} s.DeepCopyInto(obj.(*corev1.Secret)) return nil @@ -378,7 +378,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { s := &corev1.Secret{} s.DeepCopyInto(obj.(*corev1.Secret)) return nil @@ -495,7 +495,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, @@ -524,7 +524,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, "") }, }, @@ -553,7 +553,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -573,7 +573,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -602,10 +602,10 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -625,7 +625,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -645,7 +645,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { t.Errorf("should not have called update") return nil }, @@ -675,7 +675,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -688,7 +688,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { o := obj.(*unstructured.Unstructured) if len(o.GetOwnerReferences()) != 2 { t.Errorf("expected 2 owner references, got %d", len(o.GetOwnerReferences())) @@ -736,7 +736,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -756,7 +756,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { o := obj.(*unstructured.Unstructured) if len(o.GetOwnerReferences()) != 2 { t.Errorf("expected 2 owner references, got %d", len(o.GetOwnerReferences())) diff --git a/internal/controller/pkg/revision/fuzz_test.go b/internal/controller/pkg/revision/fuzz_test.go index f605419ea..1e83e209e 100644 --- a/internal/controller/pkg/revision/fuzz_test.go +++ b/internal/controller/pkg/revision/fuzz_test.go @@ -69,16 +69,16 @@ func newFuzzDag(ff *fuzz.ConsumeFuzzer) (func() dag.DAG, error) { } return func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, - MockNodeExists: func(identifier string) bool { + MockNodeExists: func(_ string) bool { return true }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { return traceNodeMap, nil }, - MockGetNode: func(s string) (dag.Node, error) { + MockGetNode: func(_ string) (dag.Node, error) { return lp, nil }, } @@ -102,7 +102,7 @@ func getFuzzMockClient(ff *fuzz.ConsumeFuzzer) (*test.MockClient, error) { } func FuzzRevisionControllerPackageHandling(f *testing.F) { - f.Fuzz(func(t *testing.T, data, revisionData []byte) { + f.Fuzz(func(_ *testing.T, data, revisionData []byte) { ff := fuzz.NewConsumer(revisionData) p := parser.New(metaScheme, objScheme) r := io.NopCloser(bytes.NewReader(data)) diff --git a/internal/controller/pkg/revision/reconciler_test.go b/internal/controller/pkg/revision/reconciler_test.go index 2350edc5f..ea7e2636e 100644 --- a/internal/controller/pkg/revision/reconciler_test.go +++ b/internal/controller/pkg/revision/reconciler_test.go @@ -274,7 +274,7 @@ func TestReconcile(t *testing.T) { pr.SetDeletionTimestamp(&now) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -380,7 +380,7 @@ func TestReconcile(t *testing.T) { pr.SetDesiredState(v1.PackageRevisionActive) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -415,7 +415,7 @@ func TestReconcile(t *testing.T) { pr.SetDesiredState(v1.PackageRevisionActive) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -732,7 +732,7 @@ func TestReconcile(t *testing.T) { WithLinter(&MockLinter{MockLint: NewMockLintFn(errBoom)}), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -789,7 +789,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -886,7 +886,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -950,7 +950,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1016,7 +1016,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1084,7 +1084,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1146,7 +1146,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1212,7 +1212,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1275,7 +1275,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1407,7 +1407,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1525,7 +1525,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1589,7 +1589,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, diff --git a/internal/controller/pkg/revision/runtime_function_test.go b/internal/controller/pkg/revision/runtime_function_test.go index c4ac2192f..87cd9e39e 100644 --- a/internal/controller/pkg/revision/runtime_function_test.go +++ b/internal/controller/pkg/revision/runtime_function_test.go @@ -74,7 +74,7 @@ func TestFunctionPreHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceFn: func(overrides ...ServiceOverride) *corev1.Service { + ServiceFn: func(_ ...ServiceOverride) *corev1.Service { return &corev1.Service{} }, TLSServerSecretFn: func() *corev1.Secret { @@ -82,17 +82,17 @@ func TestFunctionPreHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if svc, ok := obj.(*corev1.Service); ok { svc.Name = "some-service" svc.Namespace = "some-namespace" } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -189,18 +189,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return errBoom }, }, @@ -230,18 +230,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -274,18 +274,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, }, @@ -315,18 +315,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -364,18 +364,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -411,15 +411,15 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == xpManagedSA { return kerrors.NewNotFound(corev1.Resource("serviceaccount"), xpManagedSA) @@ -427,7 +427,7 @@ func TestFunctionPostHook(t *testing.T) { } return nil }, - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == xpManagedSA { t.Error("unexpected call to create SA when SA is managed externally") @@ -435,7 +435,7 @@ func TestFunctionPostHook(t *testing.T) { } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -501,15 +501,15 @@ func TestFunctionDeactivateHook(t *testing.T) { reason: "Should return error if we fail to delete deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -525,14 +525,14 @@ func TestFunctionDeactivateHook(t *testing.T) { reason: "Should not return error if successfully deleted service account and deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "some-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "some-deployment", @@ -552,7 +552,7 @@ func TestFunctionDeactivateHook(t *testing.T) { }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { switch obj.(type) { case *corev1.ServiceAccount: return errors.New("service account should not be deleted during deactivation") diff --git a/internal/controller/pkg/revision/runtime_provider_test.go b/internal/controller/pkg/revision/runtime_provider_test.go index 58c768c68..12203607c 100644 --- a/internal/controller/pkg/revision/runtime_provider_test.go +++ b/internal/controller/pkg/revision/runtime_provider_test.go @@ -149,7 +149,7 @@ func TestProviderPreHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceFn: func(overrides ...ServiceOverride) *corev1.Service { + ServiceFn: func(_ ...ServiceOverride) *corev1.Service { return &corev1.Service{} }, TLSClientSecretFn: func() *corev1.Secret { @@ -160,13 +160,13 @@ func TestProviderPreHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -261,18 +261,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return errBoom }, }, @@ -302,18 +302,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -346,18 +346,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, }, @@ -387,18 +387,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -436,18 +436,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -483,14 +483,14 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "xp-managed-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: corev1.PodTemplateSpec{ @@ -503,7 +503,7 @@ func TestProviderPostHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == "xp-managed-sa" { return kerrors.NewNotFound(corev1.Resource("serviceaccount"), "xp-managed-sa") @@ -511,7 +511,7 @@ func TestProviderPostHook(t *testing.T) { } return nil }, - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == "xp-managed-sa" { t.Error("unexpected call to create SA when SA is managed externally") @@ -519,7 +519,7 @@ func TestProviderPostHook(t *testing.T) { } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -585,15 +585,15 @@ func TestProviderDeactivateHook(t *testing.T) { reason: "Should return error if we fail to delete deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -614,14 +614,14 @@ func TestProviderDeactivateHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "some-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "some-deployment", @@ -641,7 +641,7 @@ func TestProviderDeactivateHook(t *testing.T) { }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { switch obj.(type) { case *corev1.ServiceAccount: return errors.New("service account should not be deleted during deactivation") diff --git a/internal/controller/rbac/definition/reconciler_test.go b/internal/controller/rbac/definition/reconciler_test.go index fe07f4761..193621591 100644 --- a/internal/controller/rbac/definition/reconciler_test.go +++ b/internal/controller/rbac/definition/reconciler_test.go @@ -143,7 +143,7 @@ func TestReconcile(t *testing.T) { Client: &test.MockClient{ MockGet: test.NewMockGetFn(nil), }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { // Simulate a no-op change by not allowing the update. return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) }), diff --git a/internal/controller/rbac/namespace/fuzz_test.go b/internal/controller/rbac/namespace/fuzz_test.go index db7f399cb..bc1dbd1d7 100644 --- a/internal/controller/rbac/namespace/fuzz_test.go +++ b/internal/controller/rbac/namespace/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzRenderRoles(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) ns := &corev1.Namespace{} ff.GenerateStruct(ns) diff --git a/internal/controller/rbac/namespace/reconciler_test.go b/internal/controller/rbac/namespace/reconciler_test.go index fd492dd75..02c932c79 100644 --- a/internal/controller/rbac/namespace/reconciler_test.go +++ b/internal/controller/rbac/namespace/reconciler_test.go @@ -161,7 +161,7 @@ func TestReconcile(t *testing.T) { MockGet: test.NewMockGetFn(nil), MockList: test.NewMockListFn(nil), }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { // Simulate a no-op change by not allowing the update. return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) }), diff --git a/internal/controller/rbac/provider/roles/fuzz_test.go b/internal/controller/rbac/provider/roles/fuzz_test.go index f09ec0234..337046074 100644 --- a/internal/controller/rbac/provider/roles/fuzz_test.go +++ b/internal/controller/rbac/provider/roles/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzRenderClusterRoles(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) pr := &v1.ProviderRevision{} ff.GenerateStruct(pr) diff --git a/internal/controller/rbac/provider/roles/reconciler_test.go b/internal/controller/rbac/provider/roles/reconciler_test.go index 80c530e26..2bcac3235 100644 --- a/internal/controller/rbac/provider/roles/reconciler_test.go +++ b/internal/controller/rbac/provider/roles/reconciler_test.go @@ -152,7 +152,7 @@ func TestReconcile(t *testing.T) { MockList: test.NewMockListFn(nil), }, }), - WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(ctx context.Context, requested ...rbacv1.PolicyRule) ([]Rule, error) { + WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(_ context.Context, _ ...rbacv1.PolicyRule) ([]Rule, error) { return nil, errBoom })), }, @@ -172,7 +172,7 @@ func TestReconcile(t *testing.T) { MockList: test.NewMockListFn(nil), }, }), - WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(ctx context.Context, requested ...rbacv1.PolicyRule) ([]Rule, error) { + WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(_ context.Context, _ ...rbacv1.PolicyRule) ([]Rule, error) { return []Rule{{}}, nil })), }, @@ -214,7 +214,7 @@ func TestReconcile(t *testing.T) { MockGet: test.NewMockGetFn(nil), MockList: test.NewMockListFn(nil), }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { // Simulate a no-op change by not allowing the update. return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) }), diff --git a/internal/dag/fuzz_test.go b/internal/dag/fuzz_test.go index 5d7700ae8..0d492c5a3 100644 --- a/internal/dag/fuzz_test.go +++ b/internal/dag/fuzz_test.go @@ -67,7 +67,7 @@ func (s *SimpleFuzzNode) Neighbors() []Node { } func FuzzDag(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { c := fuzz.NewConsumer(data) nodes := make([]SimpleFuzzNode, 0) err := c.CreateSlice(&nodes) diff --git a/internal/initializer/deployment_runtime_config_test.go b/internal/initializer/deployment_runtime_config_test.go index 2142fb03b..1402d750c 100644 --- a/internal/initializer/deployment_runtime_config_test.go +++ b/internal/initializer/deployment_runtime_config_test.go @@ -43,7 +43,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "FailedToCreate": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return errBoom }, }, @@ -55,7 +55,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "SuccessCreated": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -64,7 +64,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "SuccessAlreadyExists": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return kerrors.NewAlreadyExists(schema.GroupResource{}, "default") }, }, diff --git a/internal/initializer/installer_test.go b/internal/initializer/installer_test.go index 6a4ffd62b..0c7f246cf 100644 --- a/internal/initializer/installer_test.go +++ b/internal/initializer/installer_test.go @@ -226,7 +226,7 @@ func TestInstaller(t *testing.T) { p: []string{p1}, c: []string{c1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { @@ -244,7 +244,7 @@ func TestInstaller(t *testing.T) { } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -304,7 +304,7 @@ func TestInstaller(t *testing.T) { } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -316,7 +316,7 @@ func TestInstaller(t *testing.T) { args: args{ c: []string{c1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { @@ -332,7 +332,7 @@ func TestInstaller(t *testing.T) { } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -343,10 +343,10 @@ func TestInstaller(t *testing.T) { p: []string{p1}, c: []string{c1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, diff --git a/internal/initializer/lock_test.go b/internal/initializer/lock_test.go index 7c36f775c..f34823b73 100644 --- a/internal/initializer/lock_test.go +++ b/internal/initializer/lock_test.go @@ -53,7 +53,7 @@ func TestLockObject(t *testing.T) { "FailApply": { args: args{ kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, diff --git a/internal/initializer/store_config_test.go b/internal/initializer/store_config_test.go index 19ddcd583..5afe5335f 100644 --- a/internal/initializer/store_config_test.go +++ b/internal/initializer/store_config_test.go @@ -43,7 +43,7 @@ func TestStoreConfigObject(t *testing.T) { "FailedToCreate": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return errBoom }, }, @@ -55,7 +55,7 @@ func TestStoreConfigObject(t *testing.T) { "SuccessCreated": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -64,7 +64,7 @@ func TestStoreConfigObject(t *testing.T) { "SuccessAlreadyExists": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return kerrors.NewAlreadyExists(schema.GroupResource{}, "default") }, }, diff --git a/internal/initializer/tls_test.go b/internal/initializer/tls_test.go index 7fd3621e3..011524a50 100644 --- a/internal/initializer/tls_test.go +++ b/internal/initializer/tls_test.go @@ -108,7 +108,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -129,7 +129,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be updated.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -161,7 +161,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return no error after loading the CA from the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName { return nil } @@ -177,7 +177,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { MockUpdate: test.NewMockUpdateFn(nil), }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte("test-key"), []byte("test-cert"), nil }, }, @@ -191,7 +191,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be parsed.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -228,7 +228,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the server secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -260,7 +260,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the client secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -301,7 +301,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the CA and TLS certificates are generated and put into the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -337,7 +337,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsServerSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -366,7 +366,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, @@ -380,7 +380,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the CA and TLS certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -428,7 +428,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { MockGet: test.NewMockGetFn(nil), }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return nil, nil, errBoom }, }, @@ -445,7 +445,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA and TLS certificates cannot be generated.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -460,7 +460,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return nil, nil, errBoom }, }, @@ -479,7 +479,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -499,7 +499,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the server secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -530,7 +530,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the server certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -567,7 +567,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -592,7 +592,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsServerSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -609,7 +609,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, @@ -623,7 +623,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -643,7 +643,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the client secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -674,7 +674,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the client certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -711,7 +711,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -736,7 +736,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsClientSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -753,7 +753,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, diff --git a/internal/initializer/waiter_test.go b/internal/initializer/waiter_test.go index c4d2d215e..b79fb2ab6 100644 --- a/internal/initializer/waiter_test.go +++ b/internal/initializer/waiter_test.go @@ -51,7 +51,7 @@ func TestCRDWaiter(t *testing.T) { period: 1 * time.Second, timeout: 2 * time.Second, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, }, @@ -63,7 +63,7 @@ func TestCRDWaiter(t *testing.T) { timeout: 2 * time.Millisecond, period: 1 * time.Millisecond, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, }, @@ -78,7 +78,7 @@ func TestCRDWaiter(t *testing.T) { period: 1 * time.Millisecond, timeout: 1 * time.Second, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, diff --git a/internal/names/generate_test.go b/internal/names/generate_test.go index 728ac3e7d..a0bc585cf 100644 --- a/internal/names/generate_test.go +++ b/internal/names/generate_test.go @@ -110,7 +110,7 @@ func TestGenerateName(t *testing.T) { }, "SuccessAfterConflict": { reason: "Name is found on second try", - client: &test.MockClient{MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + client: &test.MockClient{MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name == "cool-resource-42" { return nil } diff --git a/internal/usage/handler_test.go b/internal/usage/handler_test.go index 0b41ba8e1..7340cfd23 100644 --- a/internal/usage/handler_test.go +++ b/internal/usage/handler_test.go @@ -122,7 +122,7 @@ func TestHandle(t *testing.T) { reason: "We should allow a delete request if there is no usages for the given object.", args: args{ reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, }, @@ -148,7 +148,7 @@ func TestHandle(t *testing.T) { reason: "We should reject a delete request if we cannot list usages.", args: args{ reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -174,7 +174,7 @@ func TestHandle(t *testing.T) { reason: "We should reject a delete request if there are usages for the given object with \"by\" defined.", args: args{ reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { @@ -232,7 +232,7 @@ func TestHandle(t *testing.T) { reason: "We should reject a delete request if there are usages for the given object with \"reason\" defined.", args: args{ reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { @@ -284,7 +284,7 @@ func TestHandle(t *testing.T) { reason: "We should reject a delete request if there are usages for the given object without \"reason\" or \"by\" defined.", args: args{ reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { diff --git a/internal/xcrd/fuzz_test.go b/internal/xcrd/fuzz_test.go index 00918b668..40e9e6c54 100644 --- a/internal/xcrd/fuzz_test.go +++ b/internal/xcrd/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzForCompositeResourceXcrd(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) xrd := &v1.CompositeResourceDefinition{} err := ff.GenerateStruct(xrd) @@ -37,7 +37,7 @@ func FuzzForCompositeResourceXcrd(f *testing.F) { } func FuzzForCompositeResourceClaim(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) xrd := &v1.CompositeResourceDefinition{} err := ff.GenerateStruct(xrd) diff --git a/internal/xpkg/build.go b/internal/xpkg/build.go index 0d2fed166..d68a8f2dc 100644 --- a/internal/xpkg/build.go +++ b/internal/xpkg/build.go @@ -252,7 +252,7 @@ func encode(pkg parser.Lintable) (*bytes.Buffer, error) { // SkipContains supplies a FilterFn that skips paths that contain the give pattern. func SkipContains(pattern string) parser.FilterFn { - return func(path string, info os.FileInfo) (bool, error) { + return func(path string, _ os.FileInfo) (bool, error) { return strings.Contains(path, pattern), nil } } diff --git a/internal/xpkg/fake/mocks.go b/internal/xpkg/fake/mocks.go index cc041d132..78b072f78 100644 --- a/internal/xpkg/fake/mocks.go +++ b/internal/xpkg/fake/mocks.go @@ -49,7 +49,7 @@ func NewMockCacheGetFn(rc io.ReadCloser, err error) func() (io.ReadCloser, error // NewMockCacheStoreFn creates a new MockStore function for MockCache. func NewMockCacheStoreFn(err error) func(s string, rc io.ReadCloser) error { - return func(s string, rc io.ReadCloser) error { return err } + return func(_ string, _ io.ReadCloser) error { return err } } // NewMockCacheDeleteFn creates a new MockDelete function for MockCache. diff --git a/internal/xpkg/upbound/resolver.go b/internal/xpkg/upbound/resolver.go index b029d1141..c4179bbb3 100644 --- a/internal/xpkg/upbound/resolver.go +++ b/internal/xpkg/upbound/resolver.go @@ -39,7 +39,7 @@ func JSON(base, overlay io.Reader) (kong.Resolver, error) { return nil, err } - var f kong.ResolverFunc = func(context *kong.Context, parent *kong.Path, flag *kong.Flag) (interface{}, error) { + var f kong.ResolverFunc = func(_ *kong.Context, _ *kong.Path, flag *kong.Flag) (interface{}, error) { name := strings.ReplaceAll(flag.Name, "-", "_") bRaw, bOk := resolveValue(name, flag.Envs, baseValues) oRaw, oOk := resolveValue(name, flag.Envs, overlayValues) diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index 2f834ef8e..e7aef5b51 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -119,7 +119,7 @@ func EnvFuncs(fns ...env.Func) env.Func { func CreateKindClusterWithConfig(clusterName, configFilePath string) env.Func { return EnvFuncs( envfuncs.CreateClusterWithConfig(kind.NewProvider(), clusterName, configFilePath), - func(ctx context.Context, config *envconf.Config) (context.Context, error) { + func(ctx context.Context, _ *envconf.Config) (context.Context, error) { b, err := os.ReadFile(filepath.Clean(configFilePath)) if err != nil { return ctx, err diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index f05a4b456..eceda36c1 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -157,7 +157,7 @@ func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { t.Logf("Waiting %s for %s to be created...", d, identifier(o)) start := time.Now() - if err := wait.For(conditions.New(c.Client().Resources()).ResourceMatch(o, func(object k8s.Object) bool { return true }), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { + if err := wait.For(conditions.New(c.Client().Resources()).ResourceMatch(o, func(_ k8s.Object) bool { return true }), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { t.Errorf("resource %s did not exist: %v", identifier(o), err) return ctx } From e401acc5bf74e6bf84083796849c7dcb70afaca5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fatih=20T=C3=BCrken?= Date: Tue, 13 Feb 2024 16:44:42 +0300 Subject: [PATCH 006/370] Update ignore-changes docs managementPolicy=>ManagementPolicies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fatih Türken --- design/one-pager-ignore-changes.md | 64 +++++++++++++++--------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/design/one-pager-ignore-changes.md b/design/one-pager-ignore-changes.md index d0eccc0ca..c5c59f45a 100644 --- a/design/one-pager-ignore-changes.md +++ b/design/one-pager-ignore-changes.md @@ -58,7 +58,7 @@ for ignoring some managed resource parameters during updates. ## Proposal -Proposed solution is to rework the new [managementPolicy] feature which came +Proposed solution is to rework the new [managementPolicies] feature which came with the [ObserveOnly] feature and transform it into a set of enum values representing what Crossplane should do with the managed resource. @@ -77,44 +77,44 @@ This will allow users to fine-tune how Crossplane manages the external resource, in a manner which is very explicit and easy to understand. Some examples on how the management policies would work and how they would -replace the current `managementPolicy` and `deletionPolicy`: +replace the current `managementPolicies` and `deletionPolicy`: ```yaml # Default spec: - managementPolicy: FullControl + managementPolicies: FullControl deletionPolicy: Delete # would be replaced with: spec: - managementPolicy: ["Create", "Update", "Delete", "Observe", "LateInitialize"] + managementPolicies: ["Create", "Update", "Delete", "Observe", "LateInitialize"] # or - managementPolicy: ["*"] + managementPolicies: ["*"] # ObserveOnly spec: - managmentPolicy: ObserveOnly + managementPolicies: ObserveOnly # would be replaced with: spec: - managementPolicy: ["Observe"] + managementPolicies: ["Observe"] # OrphanOnDelete spec: - managementPolicy: OrphanOnDelete + managementPolicies: OrphanOnDelete # would be replaced with: spec: - managementPolicy: ["Create", "Update", "Observe", "LateInitialize"] + managementPolicies: ["Create", "Update", "Observe", "LateInitialize"] -# pause can be achieved by setting managementPolicy to empty list instead of +# pause can be achieved by setting managementPolicies to empty list instead of # using the annotation spec: - managementPolicy: [] + managementPolicies: [] # Turn off late initialization spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] ``` In addition to the new management policy, we will also add a new field @@ -144,11 +144,11 @@ For example: policies according to [the ObserveOnly design doc.][ObserveOnly], but still retain some functionality if a non-default value was set. In practice, it meant that if the `deletionPolicy` was set to `Orphan`, and the -`managementPolicy` set to `FullControl`, the external resource would be +`managementPolicies` set to `FullControl`, the external resource would be orphaned. In the new design, we could still follow this approach, by orphaning the -resource even if the `managementPolicy` includes `Delete`, if the +resource even if the `managementPolicies` includes `Delete`, if the `deletionPolicy` is set to `Orphan`, until we entirely remove the deletion policy. @@ -170,11 +170,11 @@ future-proof. ### Migrating existing resources -The `managementPolicy` feature is alpha, so it should be ok to break the -API. The combinations of `managementPolicy` and `deletionPolicy` would look -like this in the new `managementPolicy` field. +The `managementPolicies` feature is alpha, so it should be ok to break the +API. The combinations of `managementPolicies` and `deletionPolicy` would look +like this in the new `managementPolicies` field. -| managementPolicy | deletionPolicy | new managementPolicy | +| managementPolicies | deletionPolicy | new managementPolicies | |------------------|----------------|---------------------------------------------------| | FullControl | Delete | ["*"] | | FullControl | Orphan | ["Create", "Update", "Observe", "LateInitialize"] | @@ -184,8 +184,8 @@ like this in the new `managementPolicy` field. | ObserveOnly | Orphan | ["Observe"] | As this will be a breaking change, if users want to keep the old -`managementPolicy` behaviour, we suggest pausing the reconciliation of the MR, -upgrading Crossplane, and then updating the `managementPolicy` to the desired +`managementPolicies` behaviour, we suggest pausing the reconciliation of the MR, +upgrading Crossplane, and then updating the `managementPolicies` to the desired value before unpausing the reconciliation. In reality this is only needed for the `ObserveOnly` and @@ -256,7 +256,7 @@ policy. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] forProvider: maxSize: 5 minSize: 1 @@ -273,7 +273,7 @@ would need to be used alongside omitting `LateInitialize` management policy. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: readCapacity: 1 writeCapacity: 1 @@ -290,7 +290,7 @@ the autoscaler would be able to control the `desiredSize` after creation. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: scalingConfig: desiredSize: 1 @@ -308,7 +308,7 @@ Just omitting the `LateInitialize` management policy would be enough as the ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] forProvider: ... ``` @@ -322,7 +322,7 @@ omitting the `LateInitialize` management policy. Example: ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: members: - user1 @@ -349,7 +349,7 @@ Ref: [Upjet Initialize] or [AWS community provider tag example]. ### PartialControl management policy + initProvider -Proposed solution is to use the new [managementPolicy] field which came with +Proposed solution is to use the new [managementPolicies] field which came with the [ObserveOnly] feature and add a new management policy that will skip late initialization. The loss the information that the late initialization was providing would be offset by the `status.atProvider` @@ -410,7 +410,7 @@ policy. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl forProvider: maxSize: 5 minSize: 1 @@ -427,7 +427,7 @@ would need to be used alongside `PartialControl` management policy. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: readCapacity: 1 writeCapacity: 1 @@ -444,7 +444,7 @@ autoscaler would be able to control the `desiredSize` after creation. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: scalingConfig: desiredSize: 1 @@ -468,7 +468,7 @@ Just using the `PartialControl` management policy would be enough as the ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl forProvider: ... ``` @@ -482,7 +482,7 @@ then ignored on updates would be solved by using `initProvider` alongside Example: ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: members: - user1 @@ -670,7 +670,7 @@ if this issue is not that widespread, we could have an easy fix. [Update section]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L1061-L1096 [Late Init section]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L1033-L1046 [Initialize]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L742 -[managementPolicy]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/apis/common/v1/policies.go#L22 +[managementPolicies]: https://github.com/crossplane/crossplane-runtime/blob/229b63d39990935b8130cf838e6488dcba5c085a/apis/common/v1/policies.go#L21 [ObserveOnly]: https://github.com/crossplane/crossplane/blob/019ddb55916396d654e53a86d9acf1cde49aee31/design/design-doc-observe-only-resources.md [ResourceLateInitialize]: https://github.com/crossplane/crossplane-runtime/blob/00239648258e9731c274fb1f879f8255b948c79a/pkg/reconciler/managed/reconciler.go#L1033 [Late Initialization Update]: https://github.com/crossplane/crossplane-runtime/blob/00239648258e9731c274fb1f879f8255b948c79a/pkg/reconciler/managed/reconciler.go#L1033 From 9615979928f95ff5776057d2eca9546f5c2763ca Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 13 Feb 2024 17:19:30 -0800 Subject: [PATCH 007/370] Add a retroactive one-pager for rate limiting Signed-off-by: Nic Cope --- design/one-pager-rate-limiting.md | 151 ++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 design/one-pager-rate-limiting.md diff --git a/design/one-pager-rate-limiting.md b/design/one-pager-rate-limiting.md new file mode 100644 index 000000000..d317e9534 --- /dev/null +++ b/design/one-pager-rate-limiting.md @@ -0,0 +1,151 @@ +# Reconciler Rate Limiting + +* Owner: Nic Cope (@negz) +* Status: Accepted + +> This one pager retroactively documents a past design decision. See +> [issue #2595] for the original proposal. + +## Background + +Crossplane consists of a series of controllers working together. Ultimately, the +job of those controllers is to reconcile desired state with an external system. +The external system might be Amazon Web Services (AWS), GitHub, or a Kubernetes +cluster. + +Crossplane and Crossplane provider reconciles are rate limited. These rate limits +attempt to ensure: + +* The maximum rate at which Crossplane calls the external system is predictable. +* Crossplane doesn't overload the API server, or the external system. +* Crossplane is as performant as possible. + +It's important that the rate at which Crossplane calls the external system is +predictable because some API calls may cost money. It's also important because +API calls are typically rate limited by the external system. Users may not want +Crossplane to exhaust those rate limits, for example because it must coexist +with other tools that are also subject to the same rate limits. + +Each Crossplane provider exposes a `--max-reconcile-rate` flag that tunes its +rate limits. This flag allows users to make their own trade off between +increased reconcile throughput and increased external API calls. + +## Controller Runtime Rate Limits + +A controller built using `controller-runtime` v0.17 uses the following defaults. + +### API Server Request Rate + +An API server client that rate limits itself to 20 queries per second (qps), +bursting to 30 queries. This client is shared by all controllers that are part +of the same controller manager (e.g. same provider). See [`config.go`]. + +### Reconcile Rate + +A rate limiter that rate limits reconciles triggered by _only_: + +* A watched object changing. +* A previous reconcile attempt returning an error. +* A previous reconcile attempt returning `reconcile.Result{Requeue: true}`. + +Importantly, a reconcile triggered by a previous reconcile attempt returning +`reconcile.Result{RequeueAfter: t}` is not subject to rate limiting. This means +reconciles triggered by `--poll-interval` are not subject to rate limiting when +using `controller-runtime` defaults. + +When a reconcile is subject to rate limiting, the earliest time the controller +will process it will be the **maximum** of: + +* The enqueue time plus a duration increasing exponentially from 5ms to 1000s + (~16 minutes). +* The enqueue time plus a duration calculated to limit the controller to 10 + requeues per second on average, using a token bucket algorithm. + +The exponential backoff rate limiting is per object (e.g. per managed resource) +while the token bucket rate limiter is per controller (e.g. per _kind of_ +managed resource). + +See [`controller.go`] and [`default_rate_limiters.go`]. + +### Concurrent Reconciles + +Each controller may process at most one reconcile concurrently. + +## Crossplane Rate Limits + +The controller-runtime defaults are not suitable for Crossplane. Crossplane +wants: + +* To wait more than 5ms before requeuing, but less than 16 minutes. +* To reconcile several managed resources of a particular kind at once. +* To rate limit _classes_ of managed resource (e.g. all AWS resources, or all + EC2 resources). + +Crossplane attempts to achieve this by deriving several rate limits from a +single flag - `--max-reconcile-rate`. The default value for this flag is usually +10 reconciles per second. The flag applies to an entire controller manager (e.g. +Crossplane, or a provider). + +Note that provider maintainers must use the functions defined in [`default.go`] +to ensure these rate limits are applied at the client, global, and controller +levels. + +### API Server Request Rate + +An API server client that rate limits itself to `--max-reconcile-rate * 5` qps, +and `--max-reconcile-rate * 10` burst. With a default `--max-reconcile-rate` of +10 this is 50 qps bursting to 100 queries. This client is shared by all +controllers that are part of the same controller manager (e.g. same provider). +See [`default.go`]. + +### Reconcile Rate + +Crossplane uses two layers of rate limiting. + +A global token bucket rate limiter limits all controllers within a provider to +`--max-reconcile-rate` reconciles per second, bursting to +`--max-reconcile-rate * 10`. With a default `--max-reconcile-rate` of 10 this is +10 reconciles per second, bursting to 100. + +All reconciles are subject to the global rate limiter, even those triggered by a +previous reconcile returning `reconcile.Result{RequeueAfter: t}`. + +An exponential backoff rate limiter limits how frequently a particular object +may be reconciled, backing off from 1s to 60s. A reconcile triggered by a +previous reconcile returning `reconcile.Result{RequeueAfter: t}` is not subject +to this rate limiter. + +Due to limitations of controller-runtime (see [issue #857]) the global rate +limiter is implemented as a middleware `Reconciler`. See [`reconciler.go`]. + +Reconciles may be rate limited by both layers. + +Consider a reconcile that was requeued because it returned an error. First it's +subject to the controller's exponential backoff reconciler, which adds the +reconcile to the controller's work queue to be processed from 1 to 60 seconds in +the future. + +When the reconcile is popped from the head of the work queue it's processed by +the middleware `Reconciler`, subject to its token bucket reconciler. If there +are sufficient tokens available in the bucket, the reconcile is passed to the +wrapped (inner) `Reconciler` immediately. If there aren't sufficient tokens +available, the reconcile is returned to the tail of the work queue by returning +`reconcile.Result{RequeueAfter: t}`. + +This results in misleading work queue duration metrics. A reconcile may travel +through the work queue (at most) twice before it's processed. + +### Concurrent Reconciles + +Each controller may process at most `--max-reconcile-rate` reconciles +concurrently. With a default `--max-reconcile-rate` of 10 each controller may +process 10 reconciles concurrently. This means a provider will reconcile at most +10 managed resources of particular kind at once. + +[issue #2595]: https://github.com/crossplane/crossplane/issues/2595 +[`config.go`]: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.17.2/pkg/client/config/config.go#L96 +[`controller.go`]: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.17.2/pkg/internal/controller/controller.go#L316 +[`default_rate_limiters.go`]: https://github.com/kubernetes/client-go/blob/v0.29.2/util/workqueue/default_rate_limiters.go#L39o +[`default.go`]: https://github.com/crossplane/crossplane-runtime/blob/v1.15.0/pkg/ratelimiter/default.go +[issue #857]: https://github.com/kubernetes-sigs/controller-runtime/issues/857 +[`reconciler.go`]: https://github.com/crossplane/crossplane-runtime/blob/v1.15.0/pkg/ratelimiter/reconciler.go#L43 \ No newline at end of file From 9c7296c3293cb1c4db8f0407c4f6df430bb5c700 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Tue, 13 Feb 2024 18:11:00 -0800 Subject: [PATCH 008/370] Add configuration-template to well known xpkg init templates Signed-off-by: Jared Watts --- cmd/crank/beta/xpkg/init.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index 922b0ed09..09e41f381 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -47,6 +47,7 @@ func WellKnownTemplates() map[string]string { "provider-template-upjet": "https://github.com/upbound/upjet-provider-template", "function-template-go": "https://github.com/crossplane/function-template-go", "function-template-python": "https://github.com/crossplane/function-template-python", + "configuration-template": "https://github.com/crossplane/configuration-template", } } From 766bcb1b54f69d56f5bfe2a188de70e26753ab63 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 14 Feb 2024 10:57:09 +0200 Subject: [PATCH 009/370] Update ADOPTERS.md with Elastic Serverless information Signed-off-by: Markos Chandras --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index b03687857..e18bf5b10 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -75,3 +75,4 @@ This list is sorted in the order that organizations were added to it. | [Imagine Learning](https://www.imaginelearning.com/) | [@blakeromano](https://github.com/blakeromano) [blake.romano@imaginelearning.com](mailto:blake.romano@imaginelearning.com) | Control Plane for Infrastructure in Internal Developer Platform | | [babelforce](https://www.babelforce.com/) | [@nik843](https://github.com/nik843) | Orchestrating relational database servers by creating databases, users and permissions for them within all environments. | | [Nike](https://nike.com/) | [joel.cooklin@nike.com](mailto:joel.cooklin@nike.com) | Crossplane powers the internal developer platform managing thousands of resources from development to production. | +| [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | From 6752269c358b7463b2cdea10fca4a135adbf5003 Mon Sep 17 00:00:00 2001 From: lsviben Date: Wed, 14 Feb 2024 12:05:15 +0100 Subject: [PATCH 010/370] add option to select branch/tag on init Signed-off-by: lsviben --- cmd/crank/beta/xpkg/init.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index 09e41f381..fa609118e 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -58,6 +58,7 @@ type initCmd struct { Directory string `short:"d" default:"." type:"path" help:"The directory to initialize. It must be empty. It will be created if it doesn't exist."` RunInitScript bool `short:"r" name:"run-init-script" help:"Runs the init.sh script if it exists without prompting"` + RefName string `short:"b" name:"ref-name" help:"The branch or tag to clone from the template repository."` } func (c *initCmd) Help() string { @@ -128,8 +129,9 @@ func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:g fs := osfs.New(c.Directory, osfs.WithBoundOS()) r, err := git.Clone(memory.NewStorage(), fs, &git.CloneOptions{ - URL: repoURL, - Depth: 1, + URL: repoURL, + Depth: 1, + ReferenceName: plumbing.ReferenceName(c.RefName), }) if err != nil { return errors.Wrapf(err, "failed to clone repository from %q", repoURL) From bf46e2619293c20082fa0a096007b1fdcb959b7f Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 15 Feb 2024 12:59:26 +0000 Subject: [PATCH 011/370] chore: bump README and renovate for 1.15 Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index a85cfa113..d2318534f 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -12,7 +12,7 @@ "prConcurrentLimit": 5, // The branches renovate should target // PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.12","release-1.13","release-1.14"], + "baseBranches": ["master","release-1.13","release-1.14","release-1.15"], "ignorePaths": [ "design/**", // We test upgrades, so leave it on an older version on purpose. diff --git a/README.md b/README.md index fe64f2ca1..1f1df9fbb 100644 --- a/README.md +++ b/README.md @@ -24,12 +24,12 @@ documentation]. | Release | Release Date | EOL | |:-------:|:-------------:|:--------:| -| v1.12 | Apr 25, 2023 | Feb 2024 | | v1.13 | Jul 27, 2023 | May 2024 | | v1.14 | Nov 1, 2023 | Aug 2024 | -| v1.15 | Early Feb '24 | Nov 2024 | +| v1.15 | Feb 15, 2024 | Nov 2024 | | v1.16 | Early May '24 | Feb 2025 | | v1.17 | Early Aug '24 | May 2025 | +| v1.18 | Early Nov '24 | Aug 2025 | You can subscribe to the [community calendar] to track all release dates, and find the most recent releases on the [releases] page. From f1000bad9e23d9f082ef336b518e1594da08dc34 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 15 Feb 2024 11:16:15 -0800 Subject: [PATCH 012/370] Poll more frequently when waiting for composed resources to become ready Also, jitter the poll interval +/- 10%. Signed-off-by: Nic Cope --- .../apiextensions/composite/reconciler.go | 45 ++++++++++++++----- .../composite/reconciler_test.go | 2 +- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index a3e9b6f51..08e0299e9 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -20,9 +20,11 @@ package composite import ( "context" "fmt" + "math/rand" "strconv" "time" + corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -228,17 +230,36 @@ func WithRecorder(er event.Recorder) ReconcilerOption { } } -// WithPollInterval specifies how long the Reconciler should wait before queueing -// a new reconciliation after a successful reconcile. The Reconciler requeues -// after a specified duration when it is not actively waiting for an external -// operation, but wishes to check whether resources it does not have a watch on -// (i.e. composed resources) need to be reconciled. -func WithPollInterval(after time.Duration) ReconcilerOption { +// A PollIntervalHook determines how frequently the XR should poll its composed +// resources. +type PollIntervalHook func(ctx context.Context, xr *composite.Unstructured) time.Duration + +// WithPollIntervalHook specifies how to determine how long the Reconciler +// should wait before queueing a new reconciliation after a successful +// reconcile. +func WithPollIntervalHook(h PollIntervalHook) ReconcilerOption { return func(r *Reconciler) { - r.pollInterval = after + r.pollInterval = h } } +// WithPollInterval specifies how long the Reconciler should wait before +// queueing a new reconciliation after a successful reconcile. The Reconciler +// uses the interval jittered +/- 10% when all composed resources are ready. It +// polls twice as frequently (i.e. at half the supplied interval) +/- 10% when +// waiting for composed resources to become ready. +func WithPollInterval(interval time.Duration) ReconcilerOption { + return WithPollIntervalHook(func(_ context.Context, xr *composite.Unstructured) time.Duration { + // The XR is ready when its composed resources are ready. If the + // XR isn't ready yet, poll more frequently. + if xr.GetCondition(xpv1.TypeReady).Status != corev1.ConditionTrue { + interval /= 2 + } + // Jitter the poll interval +/- 10%. + return interval + time.Duration((rand.Float64()-0.5)*2*(float64(interval)*0.1)) //nolint:gosec // No need for secure randomness + }) +} + // WithClient specifies how the Reconciler should interact with the Kubernetes // API. func WithClient(c client.Client) ReconcilerOption { @@ -421,7 +442,7 @@ func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...Recon log: logging.NewNopLogger(), record: event.NewNopRecorder(), - pollInterval: defaultPollInterval, + pollInterval: func(_ context.Context, _ *composite.Unstructured) time.Duration { return defaultPollInterval }, } for _, f := range opts { @@ -448,7 +469,7 @@ type Reconciler struct { log logging.Logger record event.Recorder - pollInterval time.Duration + pollInterval PollIntervalHook } // Reconcile a composite resource. @@ -678,17 +699,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco for i, cd := range unready { names[i] = string(cd.ResourceName) } - // sort for stable condition messages. With functions, we don't have a + // Sort for stable condition messages. With functions, we don't have a // stable order otherwise. xr.SetConditions(xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, names)))) - return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) + return reconcile.Result{RequeueAfter: r.pollInterval(ctx, xr)}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } // We requeue after our poll interval because we can't watch composed // resources - we can't know what type of resources we might compose // when this controller is started. xr.SetConditions(xpv1.Available()) - return reconcile.Result{RequeueAfter: r.pollInterval}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) + return reconcile.Result{RequeueAfter: r.pollInterval(ctx, xr)}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } // EnqueueForCompositionRevisionFunc returns a function that enqueues (the diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 3e7dc87f7..177c5ef6e 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -543,7 +543,7 @@ func TestReconcile(t *testing.T) { }, }, want: want{ - r: reconcile.Result{Requeue: true}, + r: reconcile.Result{RequeueAfter: defaultPollInterval}, }, }, "ComposedResourcesReady": { From 3f85218639a74f2b521cae4d03308e6d8da1ceaf Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sat, 17 Feb 2024 05:36:21 -0800 Subject: [PATCH 013/370] Revert to requeuing immediately for unready composed resources Signed-off-by: Nic Cope --- .../apiextensions/composite/reconciler.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 08e0299e9..163b1dfd0 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -24,7 +24,6 @@ import ( "strconv" "time" - corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -249,12 +248,7 @@ func WithPollIntervalHook(h PollIntervalHook) ReconcilerOption { // polls twice as frequently (i.e. at half the supplied interval) +/- 10% when // waiting for composed resources to become ready. func WithPollInterval(interval time.Duration) ReconcilerOption { - return WithPollIntervalHook(func(_ context.Context, xr *composite.Unstructured) time.Duration { - // The XR is ready when its composed resources are ready. If the - // XR isn't ready yet, poll more frequently. - if xr.GetCondition(xpv1.TypeReady).Status != corev1.ConditionTrue { - interval /= 2 - } + return WithPollIntervalHook(func(_ context.Context, _ *composite.Unstructured) time.Duration { // Jitter the poll interval +/- 10%. return interval + time.Duration((rand.Float64()-0.5)*2*(float64(interval)*0.1)) //nolint:gosec // No need for secure randomness }) @@ -702,7 +696,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // Sort for stable condition messages. With functions, we don't have a // stable order otherwise. xr.SetConditions(xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, names)))) - return reconcile.Result{RequeueAfter: r.pollInterval(ctx, xr)}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) + + // TODO(negz): This will exponentially backoff from 1s to 1m. We + // probably don't want to back off so much when waiting for composed + // resources to become ready. + return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } // We requeue after our poll interval because we can't watch composed From be615b91ad4e49301dbbe10bcf6c47c96fe229a9 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sat, 17 Feb 2024 06:35:36 -0800 Subject: [PATCH 014/370] Cap XR requeue backoff to 30 seconds This will allow us to detect ready XRs slightly faster. Previously backoff was from 1 to 60 seconds. This means an XR that wasn't ready in the first 63 seconds would be polled every 60 seconds until it became ready. Now backoff is from 1 to 30 seconds. This means an XR that isn't ready in the first 31 seconds will be polled every 30 seconds until it becomes ready. Note that this change affects XRs that are persistently returning errors, not just unready XRs. The XR reconciler only returns errors when it can't get the XR or can't update the status of the XR. Signed-off-by: Nic Cope --- internal/controller/apiextensions/composite/reconciler.go | 6 +++--- .../controller/apiextensions/composite/reconciler_test.go | 2 +- .../controller/apiextensions/definition/reconciler.go | 8 ++++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 163b1dfd0..d33f7166e 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -697,9 +697,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // stable order otherwise. xr.SetConditions(xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, names)))) - // TODO(negz): This will exponentially backoff from 1s to 1m. We - // probably don't want to back off so much when waiting for composed - // resources to become ready. + // This requeue is subject to rate limiting. Requeues will exponentially + // backoff from 1 to 30 seconds. See the 'definition' (XRD) reconciler + // that sets up the ratelimiter. return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 177c5ef6e..3e7dc87f7 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -543,7 +543,7 @@ func TestReconcile(t *testing.T) { }, }, want: want{ - r: reconcile.Result{RequeueAfter: defaultPollInterval}, + r: reconcile.Result{Requeue: true}, }, }, "ComposedResourcesReady": { diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 4ae28275b..c93b2bc7d 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -463,6 +463,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } cr := composite.NewReconciler(r.mgr, ck, ro...) ko := r.options.ForControllerRuntime() + + // Most controllers use this type of rate limiter to backoff requeues from 1 + // to 60 seconds. Despite the name, it doesn't only rate limit requeues due + // to errors. It also rate limits requeues due to a reconcile returning + // {Requeue: true}. The XR reconciler returns {Requeue: true} while waiting + // for composed resources to become ready, and we don't want to back off as + // far as 60 seconds. Instead we cap the XR reconciler at 30 seconds. + ko.RateLimiter = workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 30*time.Second) ko.Reconciler = ratelimiter.NewReconciler(composite.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) xrGVK := d.GetCompositeGroupVersionKind() From 33b6bfd93d102827ec45c38fd545df20033a33fb Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 17:48:13 -0800 Subject: [PATCH 015/370] Enable all golangci-linters It seems like new linters are being added with each new version of golangci-lint. I like the idea of opting out of new linters we don't like, rather than opting into new linters. I think there will be several that we'll want to opt out of, which I'll do in a following commit. Signed-off-by: Nic Cope --- .golangci.yml | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 2cb328129..15e7bbb6a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -109,20 +109,7 @@ linters-settings: linters: - enable: - - megacheck - - govet - - gocyclo - - gocritic - - goconst - - gci - - gofmt # We enable this as well as goimports for its simplify mode. - - prealloc - - revive - - unconvert - - misspell - - nakedret - - nolintlint + enable-all: true disable: # These linters are all deprecated as of golangci-lint v1.49.0. We disable @@ -133,9 +120,6 @@ linters: - structcheck - interfacer - presets: - - bugs - - unused fast: false From e46a5bfd7341f9742ec44340e201351845346c74 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 17:51:01 -0800 Subject: [PATCH 016/370] Files automatically fixed by enabling all linters Signed-off-by: Nic Cope --- apis/apiextensions/apiextensions.go | 4 +- apis/apiextensions/v1/composition_common.go | 8 +-- .../v1/composition_environment.go | 3 - .../v1/composition_environment_test.go | 3 - .../v1/composition_transforms.go | 3 - .../v1/composition_validation.go | 2 +- .../v1/composition_validation_test.go | 70 +++++++++++++++---- apis/apiextensions/v1/register.go | 6 +- apis/apiextensions/v1alpha1/register.go | 6 +- apis/apiextensions/v1beta1/register.go | 6 +- .../zz_generated.composition_common.go | 8 +-- .../zz_generated.composition_environment.go | 3 - .../zz_generated.composition_transforms.go | 3 - apis/apis.go | 4 +- apis/pkg/meta/v1/interfaces.go | 6 +- apis/pkg/meta/v1/register.go | 6 +- apis/pkg/meta/v1alpha1/register.go | 6 +- apis/pkg/meta/v1beta1/register.go | 6 +- apis/pkg/pkg.go | 4 +- apis/pkg/v1/interfaces.go | 8 +-- apis/pkg/v1/interfaces_test.go | 18 +++-- apis/pkg/v1/register.go | 6 +- apis/pkg/v1alpha1/register.go | 6 +- apis/pkg/v1beta1/function_interfaces.go | 4 +- apis/pkg/v1beta1/function_interfaces_test.go | 8 ++- apis/pkg/v1beta1/lock.go | 6 +- apis/pkg/v1beta1/register.go | 6 +- apis/secrets/secrets.go | 4 +- apis/secrets/v1alpha1/register.go | 6 +- apis/secrets/v1alpha1/storeconfig_types.go | 4 +- ...ns.crossplane.io_compositionrevisions.yaml | 4 +- ...extensions.crossplane.io_compositions.yaml | 2 +- .../beta/convert/deploymentruntime/cmd.go | 4 +- .../convert/deploymentruntime/converter.go | 11 ++- .../deploymentruntime/converter_test.go | 52 +++++++------- cmd/crank/beta/convert/io/io.go | 2 +- .../beta/convert/pipelinecomposition/cmd.go | 6 +- .../convert/pipelinecomposition/converter.go | 10 +-- .../pipelinecomposition/converter_test.go | 17 ++--- cmd/crank/beta/render/cmd.go | 24 +++---- cmd/crank/beta/render/load_test.go | 15 ++-- cmd/crank/beta/render/render_test.go | 4 -- cmd/crank/beta/top/top.go | 5 +- cmd/crank/beta/top/top_test.go | 4 +- .../beta/trace/internal/printer/default.go | 3 +- .../trace/internal/printer/default_test.go | 1 - cmd/crank/beta/trace/internal/printer/dot.go | 5 +- .../beta/trace/internal/printer/dot_test.go | 4 +- cmd/crank/beta/trace/internal/printer/json.go | 3 +- .../beta/trace/internal/printer/json_test.go | 1 - .../beta/trace/internal/printer/printer.go | 2 +- .../beta/trace/internal/resource/client.go | 1 - .../internal/resource/xpkg/client_test.go | 2 +- .../trace/internal/resource/xpkg/xpkg_test.go | 2 +- cmd/crank/beta/trace/trace.go | 14 ++-- cmd/crank/beta/validate/cache.go | 14 ++-- cmd/crank/beta/validate/cmd.go | 2 +- cmd/crank/beta/validate/image.go | 6 +- cmd/crank/beta/validate/loader.go | 17 +++-- cmd/crank/beta/validate/manager.go | 8 +-- cmd/crank/beta/validate/validate.go | 2 +- cmd/crank/beta/xpkg/init.go | 8 +-- cmd/crank/beta/xpkg/init_test.go | 2 +- cmd/crank/main.go | 10 +-- cmd/crank/xpkg/build.go | 13 ++-- cmd/crank/xpkg/install.go | 15 ++-- cmd/crank/xpkg/login.go | 8 +-- cmd/crank/xpkg/push.go | 2 +- cmd/crank/xpkg/update.go | 4 +- cmd/crossplane/core/core.go | 44 ++++++------ cmd/crossplane/core/init.go | 24 +++---- cmd/crossplane/main.go | 12 ++-- cmd/crossplane/rbac/rbac.go | 20 +++--- .../apiextensions/claim/connection.go | 2 +- .../apiextensions/claim/connection_test.go | 4 +- .../controller/apiextensions/claim/object.go | 6 +- .../apiextensions/claim/reconciler.go | 4 +- .../apiextensions/claim/syncer_ssa.go | 2 +- .../apiextensions/composite/api_test.go | 3 +- .../composite/composition_functions_test.go | 31 ++++---- .../composite/composition_patches.go | 2 +- .../composite/composition_patches_test.go | 36 ++++++---- .../apiextensions/composite/composition_pt.go | 6 +- .../composite/composition_pt_test.go | 2 - .../composite/composition_render.go | 2 +- .../composite/composition_transforms.go | 1 - .../composite/composition_transforms_test.go | 5 +- .../apiextensions/composite/connection.go | 2 +- .../composite/connection_test.go | 1 - .../composite/environment_fetcher.go | 2 +- .../composite/environment_selector.go | 2 +- .../composite/environment_selector_test.go | 4 -- .../apiextensions/composite/fuzz_test.go | 8 +-- .../apiextensions/composite/ready.go | 6 +- .../apiextensions/composite/reconciler.go | 2 +- .../apiextensions/composition/reconciler.go | 2 +- .../apiextensions/offered/reconciler_test.go | 3 +- .../apiextensions/offered/watch_test.go | 4 +- .../controller/pkg/manager/reconciler_test.go | 1 + .../controller/pkg/revision/reconciler.go | 3 +- .../pkg/revision/runtime_override_options.go | 9 +-- .../revision/runtime_override_options_test.go | 3 +- .../pkg/revision/runtime_provider_test.go | 4 +- .../controller/pkg/revision/watch_test.go | 4 +- .../controller/rbac/definition/reconciler.go | 1 - .../controller/rbac/namespace/reconciler.go | 1 - .../controller/rbac/namespace/roles_test.go | 1 - internal/controller/rbac/namespace/watch.go | 1 - .../controller/rbac/namespace/watch_test.go | 4 +- .../rbac/provider/binding/reconciler.go | 1 - .../rbac/provider/roles/reconciler.go | 4 +- .../rbac/provider/roles/watch_test.go | 4 +- internal/dag/dag_test.go | 6 +- internal/features/features.go | 2 +- internal/initializer/cert_generator.go | 18 +++-- internal/initializer/crds_migrator.go | 2 +- internal/initializer/tls.go | 10 +-- internal/initializer/tls_test.go | 2 - internal/transport/transport_test.go | 6 +- internal/usage/handler.go | 2 +- internal/xcrd/crd_test.go | 19 +++-- internal/xcrd/schemas.go | 4 +- internal/xfn/function_runner.go | 2 +- internal/xpkg/fetch.go | 2 +- internal/xpkg/fuzz_test.go | 3 +- internal/xpkg/upbound/config/config.go | 2 +- internal/xpkg/upbound/config/source.go | 6 +- internal/xpkg/upbound/config/source_test.go | 8 ++- internal/xpkg/upbound/context.go | 21 +++--- internal/xpkg/upbound/context_test.go | 2 +- .../apiextensions/v1/composition/patches.go | 2 - .../v1/composition/patches_test.go | 47 +++++++++++-- .../v1/composition/readinessChecks_test.go | 1 - .../apiextensions/v1/composition/schema.go | 3 +- .../v1/composition/schema_test.go | 60 ++++++++++------ .../v1/composition/validator_test.go | 12 ++-- test/e2e/apiextensions_test.go | 10 ++- test/e2e/config/environment.go | 4 +- test/e2e/environmentconfig_test.go | 1 + test/e2e/funcs/env.go | 3 +- test/e2e/funcs/feature.go | 11 ++- test/e2e/main_test.go | 16 ++--- test/e2e/utils/cert.go | 2 +- 143 files changed, 590 insertions(+), 524 deletions(-) diff --git a/apis/apiextensions/apiextensions.go b/apis/apiextensions/apiextensions.go index c4ee174cf..9d71f0494 100644 --- a/apis/apiextensions/apiextensions.go +++ b/apis/apiextensions/apiextensions.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 4800d6a1d..624b9d406 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -35,7 +35,7 @@ import ( into composition_revision_types.go. */ -// A CompositionMode determines what mode of Composition is used +// A CompositionMode determines what mode of Composition is used. type CompositionMode string const ( @@ -63,7 +63,7 @@ type TypeReference struct { Kind string `json:"kind"` } -// TypeReferenceTo returns a reference to the supplied GroupVersionKind +// TypeReferenceTo returns a reference to the supplied GroupVersionKind. func TypeReferenceTo(gvk schema.GroupVersionKind) TypeReference { return TypeReference{APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind} } @@ -146,7 +146,7 @@ func (t *ReadinessCheckType) IsValid() bool { } // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // TODO(negz): Optional fields should be nil in the next version of this // API. How would we know if we actually wanted to match the empty string, @@ -174,7 +174,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. // +kubebuilder:default="Ready" diff --git a/apis/apiextensions/v1/composition_environment.go b/apis/apiextensions/v1/composition_environment.go index a7d6fb210..8635795b3 100644 --- a/apis/apiextensions/v1/composition_environment.go +++ b/apis/apiextensions/v1/composition_environment.go @@ -80,7 +80,6 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { // ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { - if e == nil || len(e.EnvironmentConfigs) == 0 { return false } @@ -185,7 +184,6 @@ const ( // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { - // Mode specifies retrieval strategy: "Single" or "Multiple". // +kubebuilder:validation:Enum=Single;Multiple // +kubebuilder:default=Single @@ -207,7 +205,6 @@ type EnvironmentSourceSelector struct { // Validate logically validates the EnvironmentSourceSelector. func (e *EnvironmentSourceSelector) Validate() *field.Error { - if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") } diff --git a/apis/apiextensions/v1/composition_environment_test.go b/apis/apiextensions/v1/composition_environment_test.go index 12d4a8aa0..d3e6e4904 100644 --- a/apis/apiextensions/v1/composition_environment_test.go +++ b/apis/apiextensions/v1/composition_environment_test.go @@ -151,7 +151,6 @@ func TestEnvironmentShouldResolve(t *testing.T) { } for name, tc := range cases { - t.Run(name, func(t *testing.T) { got := tc.args.ec.ShouldResolve(tc.args.refs) if diff := cmp.Diff(tc.want, got); diff != "" { @@ -162,7 +161,6 @@ func TestEnvironmentShouldResolve(t *testing.T) { } func TestEnvironmentSourceSelectorValidate(t *testing.T) { - type args struct { e *EnvironmentSourceSelector } @@ -201,7 +199,6 @@ func TestEnvironmentSourceSelectorValidate(t *testing.T) { } for name, tc := range cases { - t.Run(name, func(t *testing.T) { got := tc.args.e.Validate() if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(field.Error{}, "Detail", "BadValue")); diff != "" { diff --git a/apis/apiextensions/v1/composition_transforms.go b/apis/apiextensions/v1/composition_transforms.go index 5a15600ad..258e380ed 100644 --- a/apis/apiextensions/v1/composition_transforms.go +++ b/apis/apiextensions/v1/composition_transforms.go @@ -45,7 +45,6 @@ const ( // Transform is a unit of process whose input is transformed into an output with // the supplied configuration. type Transform struct { - // Type of the transform to be run. // +kubebuilder:validation:Enum=map;match;math;string;convert Type TransformType `json:"type"` @@ -360,7 +359,6 @@ const ( // A StringTransform returns a string given the supplied input. type StringTransform struct { - // Type of the string transform to be run. // +optional // +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp;Join @@ -430,7 +428,6 @@ func (s *StringTransform) Validate() *field.Error { return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type") } return nil - } // A StringTransformRegexp extracts a match from the input using a regular diff --git a/apis/apiextensions/v1/composition_validation.go b/apis/apiextensions/v1/composition_validation.go index 75b8586bb..8dd7285fa 100644 --- a/apis/apiextensions/v1/composition_validation.go +++ b/apis/apiextensions/v1/composition_validation.go @@ -76,7 +76,7 @@ func (c *Composition) validatePipeline() (errs field.ErrorList) { // validatePatchSets checks that: // - patchSets are composed of valid patches // - there are no nested patchSets -// - only existing patchSets are used by resources +// - only existing patchSets are used by resources. func (c *Composition) validatePatchSets() (errs field.ErrorList) { definedPatchSets := make(map[string]bool, len(c.Spec.PatchSets)) for i, s := range c.Spec.PatchSets { diff --git a/apis/apiextensions/v1/composition_validation_test.go b/apis/apiextensions/v1/composition_validation_test.go index c808da010..8c8202f83 100644 --- a/apis/apiextensions/v1/composition_validation_test.go +++ b/apis/apiextensions/v1/composition_validation_test.go @@ -713,7 +713,9 @@ func TestCompositionValidateEnvironment(t *testing.T) { comp: &Composition{ Spec: CompositionSpec{ Environment: &EnvironmentConfiguration{}, - }}}, + }, + }, + }, }, "ValidNilEnvironment": { reason: "Should accept a nil environment", @@ -721,7 +723,9 @@ func TestCompositionValidateEnvironment(t *testing.T) { comp: &Composition{ Spec: CompositionSpec{ Environment: nil, - }}}, + }, + }, + }, }, "ValidEnvironment": { reason: "Should accept a valid environment", @@ -751,7 +755,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidPatchEnvironment": { reason: "Should reject an environment declaring an invalid patch", @@ -770,7 +782,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { Patches: []EnvironmentPatch{ { Type: PatchTypeFromCompositeFieldPath, - //FromFieldPath: ptr.To("spec.foo"), // missing + // FromFieldPath: ptr.To("spec.foo"), // missing ToFieldPath: ptr.To("metadata.annotations[\"foo\"]"), }, }, @@ -789,7 +801,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidEnvironmentSourceReferenceNoName": { reason: "Should reject a invalid environment, due to a missing name", @@ -809,7 +829,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { { Type: EnvironmentSourceTypeReference, Ref: &EnvironmentSourceReference{ - //Name: "foo", // missing + // Name: "foo", // missing }, }, { @@ -820,7 +840,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidEnvironmentSourceSelectorNoKey": { reason: "Should reject a invalid environment due to a missing key in a selector", @@ -849,9 +877,17 @@ func TestCompositionValidateEnvironment(t *testing.T) { MatchLabels: []EnvironmentSourceSelectorLabelMatcher{ { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, - //Key: "foo", // missing + // Key: "foo", // missing ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidMultipleErrors": { reason: "Should reject a invalid environment due to multiple errors, reporting all of them", @@ -862,7 +898,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { Patches: []EnvironmentPatch{ { Type: PatchTypeFromCompositeFieldPath, - //FromFieldPath: ptr.To("spec.foo"), // missing + // FromFieldPath: ptr.To("spec.foo"), // missing ToFieldPath: ptr.To("metadata.annotations[\"foo\"]"), }, }, @@ -870,7 +906,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { { Type: EnvironmentSourceTypeReference, Ref: &EnvironmentSourceReference{ - //Name: "foo", // missing + // Name: "foo", // missing }, }, { @@ -879,9 +915,17 @@ func TestCompositionValidateEnvironment(t *testing.T) { MatchLabels: []EnvironmentSourceSelectorLabelMatcher{ { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, - //Key: "foo", // missing + // Key: "foo", // missing ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, want: want{ output: field.ErrorList{ { diff --git a/apis/apiextensions/v1/register.go b/apis/apiextensions/v1/register.go index 1f8100cca..8caa69904 100644 --- a/apis/apiextensions/v1/register.go +++ b/apis/apiextensions/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1alpha1/register.go b/apis/apiextensions/v1alpha1/register.go index b8a13c126..cdb549a3a 100644 --- a/apis/apiextensions/v1alpha1/register.go +++ b/apis/apiextensions/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1beta1/register.go b/apis/apiextensions/v1beta1/register.go index 22eaf4bbf..212dfa5b4 100644 --- a/apis/apiextensions/v1beta1/register.go +++ b/apis/apiextensions/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index d2b488430..20aada02d 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -37,7 +37,7 @@ import ( into composition_revision_types.go. */ -// A CompositionMode determines what mode of Composition is used +// A CompositionMode determines what mode of Composition is used. type CompositionMode string const ( @@ -65,7 +65,7 @@ type TypeReference struct { Kind string `json:"kind"` } -// TypeReferenceTo returns a reference to the supplied GroupVersionKind +// TypeReferenceTo returns a reference to the supplied GroupVersionKind. func TypeReferenceTo(gvk schema.GroupVersionKind) TypeReference { return TypeReference{APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind} } @@ -148,7 +148,7 @@ func (t *ReadinessCheckType) IsValid() bool { } // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // TODO(negz): Optional fields should be nil in the next version of this // API. How would we know if we actually wanted to match the empty string, @@ -176,7 +176,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. // +kubebuilder:default="Ready" diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go index a521cbc1c..a7e0d451f 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go @@ -82,7 +82,6 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { // ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { - if e == nil || len(e.EnvironmentConfigs) == 0 { return false } @@ -187,7 +186,6 @@ const ( // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { - // Mode specifies retrieval strategy: "Single" or "Multiple". // +kubebuilder:validation:Enum=Single;Multiple // +kubebuilder:default=Single @@ -209,7 +207,6 @@ type EnvironmentSourceSelector struct { // Validate logically validates the EnvironmentSourceSelector. func (e *EnvironmentSourceSelector) Validate() *field.Error { - if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") } diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go index c7f611757..b73c57c46 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go @@ -47,7 +47,6 @@ const ( // Transform is a unit of process whose input is transformed into an output with // the supplied configuration. type Transform struct { - // Type of the transform to be run. // +kubebuilder:validation:Enum=map;match;math;string;convert Type TransformType `json:"type"` @@ -362,7 +361,6 @@ const ( // A StringTransform returns a string given the supplied input. type StringTransform struct { - // Type of the string transform to be run. // +optional // +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp;Join @@ -432,7 +430,6 @@ func (s *StringTransform) Validate() *field.Error { return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type") } return nil - } // A StringTransformRegexp extracts a match from the input using a regular diff --git a/apis/apis.go b/apis/apis.go index a7f723f0b..00eae4d5a 100644 --- a/apis/apis.go +++ b/apis/apis.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/pkg/meta/v1/interfaces.go b/apis/pkg/meta/v1/interfaces.go index 6b3c3b16e..c6ee67aa4 100644 --- a/apis/pkg/meta/v1/interfaces.go +++ b/apis/pkg/meta/v1/interfaces.go @@ -16,8 +16,10 @@ limitations under the License. package v1 -var _ Pkg = &Configuration{} -var _ Pkg = &Provider{} +var ( + _ Pkg = &Configuration{} + _ Pkg = &Provider{} +) // Pkg is a description of a Crossplane package. // +k8s:deepcopy-gen=false diff --git a/apis/pkg/meta/v1/register.go b/apis/pkg/meta/v1/register.go index 0ede3e8aa..5444051c9 100644 --- a/apis/pkg/meta/v1/register.go +++ b/apis/pkg/meta/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/meta/v1alpha1/register.go b/apis/pkg/meta/v1alpha1/register.go index 41f37e2c0..5829a58d1 100644 --- a/apis/pkg/meta/v1alpha1/register.go +++ b/apis/pkg/meta/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/meta/v1beta1/register.go b/apis/pkg/meta/v1beta1/register.go index d5675481d..909d27feb 100644 --- a/apis/pkg/meta/v1beta1/register.go +++ b/apis/pkg/meta/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/pkg.go b/apis/pkg/pkg.go index 81cac3e51..14b868bd2 100644 --- a/apis/pkg/pkg.go +++ b/apis/pkg/pkg.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index 9a1c55638..d3a1b2f8e 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -126,7 +126,7 @@ func (p *Provider) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *Provider) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -271,7 +271,7 @@ func (p *Configuration) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *Configuration) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -444,7 +444,7 @@ func (p *ProviderRevision) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *ProviderRevision) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -601,7 +601,7 @@ func (p *ConfigurationRevision) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *ConfigurationRevision) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } diff --git a/apis/pkg/v1/interfaces_test.go b/apis/pkg/v1/interfaces_test.go index f6c2e3ae7..cdb00f4c7 100644 --- a/apis/pkg/v1/interfaces_test.go +++ b/apis/pkg/v1/interfaces_test.go @@ -16,11 +16,17 @@ limitations under the License. package v1 -var _ Package = &Provider{} -var _ Package = &Configuration{} +var ( + _ Package = &Provider{} + _ Package = &Configuration{} +) -var _ PackageRevision = &ProviderRevision{} -var _ PackageRevision = &ConfigurationRevision{} +var ( + _ PackageRevision = &ProviderRevision{} + _ PackageRevision = &ConfigurationRevision{} +) -var _ PackageRevisionList = &ProviderRevisionList{} -var _ PackageRevisionList = &ConfigurationRevisionList{} +var ( + _ PackageRevisionList = &ProviderRevisionList{} + _ PackageRevisionList = &ConfigurationRevisionList{} +) diff --git a/apis/pkg/v1/register.go b/apis/pkg/v1/register.go index 56800528f..34b27cbfb 100644 --- a/apis/pkg/v1/register.go +++ b/apis/pkg/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/v1alpha1/register.go b/apis/pkg/v1alpha1/register.go index 63509f215..f18ca3e75 100644 --- a/apis/pkg/v1alpha1/register.go +++ b/apis/pkg/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/v1beta1/function_interfaces.go b/apis/pkg/v1beta1/function_interfaces.go index 444c49d78..4166f4f16 100644 --- a/apis/pkg/v1beta1/function_interfaces.go +++ b/apis/pkg/v1beta1/function_interfaces.go @@ -34,7 +34,7 @@ func (f *Function) SetConditions(c ...xpv1.Condition) { f.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (f *Function) CleanConditions() { f.Status.Conditions = []xpv1.Condition{} } @@ -177,7 +177,7 @@ func (r *FunctionRevision) SetConditions(c ...xpv1.Condition) { r.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (r *FunctionRevision) CleanConditions() { r.Status.Conditions = []xpv1.Condition{} } diff --git a/apis/pkg/v1beta1/function_interfaces_test.go b/apis/pkg/v1beta1/function_interfaces_test.go index 783149ee4..02bb84324 100644 --- a/apis/pkg/v1beta1/function_interfaces_test.go +++ b/apis/pkg/v1beta1/function_interfaces_test.go @@ -18,6 +18,8 @@ package v1beta1 import v1 "github.com/crossplane/crossplane/apis/pkg/v1" -var _ v1.Package = &Function{} -var _ v1.PackageRevision = &FunctionRevision{} -var _ v1.PackageRevisionList = &FunctionRevisionList{} +var ( + _ v1.Package = &Function{} + _ v1.PackageRevision = &FunctionRevision{} + _ v1.PackageRevisionList = &FunctionRevisionList{} +) diff --git a/apis/pkg/v1beta1/lock.go b/apis/pkg/v1beta1/lock.go index 195fd0273..955d765a8 100644 --- a/apis/pkg/v1beta1/lock.go +++ b/apis/pkg/v1beta1/lock.go @@ -22,8 +22,10 @@ import ( "github.com/crossplane/crossplane/internal/dag" ) -var _ dag.Node = &Dependency{} -var _ dag.Node = &LockPackage{} +var ( + _ dag.Node = &Dependency{} + _ dag.Node = &LockPackage{} +) // A PackageType is a type of package. type PackageType string diff --git a/apis/pkg/v1beta1/register.go b/apis/pkg/v1beta1/register.go index a89fe8936..9c70db5f9 100644 --- a/apis/pkg/v1beta1/register.go +++ b/apis/pkg/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/secrets/secrets.go b/apis/secrets/secrets.go index c2089d76d..66e59c595 100644 --- a/apis/secrets/secrets.go +++ b/apis/secrets/secrets.go @@ -30,10 +30,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/secrets/v1alpha1/register.go b/apis/secrets/v1alpha1/register.go index 96259d67f..8bb63b194 100644 --- a/apis/secrets/v1alpha1/register.go +++ b/apis/secrets/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/secrets/v1alpha1/storeconfig_types.go b/apis/secrets/v1alpha1/storeconfig_types.go index 12442760c..ba364c1d4 100644 --- a/apis/secrets/v1alpha1/storeconfig_types.go +++ b/apis/secrets/v1alpha1/storeconfig_types.go @@ -43,14 +43,14 @@ type StoreConfig struct { // +kubebuilder:object:root=true -// StoreConfigList contains a list of StoreConfig +// StoreConfigList contains a list of StoreConfig. type StoreConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []StoreConfig `json:"items"` } -// GetStoreConfig returns SecretStoreConfig +// GetStoreConfig returns SecretStoreConfig. func (in *StoreConfig) GetStoreConfig() xpv1.SecretStoreConfig { return in.Spec.SecretStoreConfig } diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 35df695b2..e21337683 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1471,7 +1471,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose @@ -3047,7 +3047,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 0b6bb2872..4f0b43972 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1464,7 +1464,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose diff --git a/cmd/crank/beta/convert/deploymentruntime/cmd.go b/cmd/crank/beta/convert/deploymentruntime/cmd.go index 4c3bf4a5a..f1ada34f7 100644 --- a/cmd/crank/beta/convert/deploymentruntime/cmd.go +++ b/cmd/crank/beta/convert/deploymentruntime/cmd.go @@ -34,10 +34,10 @@ import ( // Cmd arguments and flags for convert deployment-runtime subcommand. type Cmd struct { // Arguments. - InputFile string `arg:"" type:"path" optional:"" default:"-" help:"The ControllerConfig file to be Converted. If not specified or '-', stdin will be used."` + InputFile string `arg:"" default:"-" help:"The ControllerConfig file to be Converted. If not specified or '-', stdin will be used." optional:"" type:"path"` // Flags. - OutputFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the generated DeploymentRuntimeConfig to. If not specified, stdout will be used."` + OutputFile string `help:"The file to write the generated DeploymentRuntimeConfig to. If not specified, stdout will be used." placeholder:"PATH" short:"o" type:"path"` fs afero.Fs } diff --git a/cmd/crank/beta/convert/deploymentruntime/converter.go b/cmd/crank/beta/convert/deploymentruntime/converter.go index bc3a35931..19f7fcf99 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter.go @@ -30,7 +30,7 @@ import ( ) const ( - // default container name that XP uses + // default container name that XP uses. runtimeContainerName = "package-runtime" errNilControllerConfig = "ControllerConfig is nil" @@ -39,7 +39,7 @@ const ( var timeNow = time.Now() // controllerConfigToDeploymentRuntimeConfig converts a ControllerConfig to -// a DeploymentRuntimeConfig +// a DeploymentRuntimeConfig. func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (*v1beta1.DeploymentRuntimeConfig, error) { if cc == nil { return nil, errors.New(errNilControllerConfig) @@ -165,8 +165,7 @@ func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Contai c.Env = append(c.Env, cc.Spec.Env...) } if len(cc.Spec.VolumeMounts) > 0 { - c.VolumeMounts = - append(c.VolumeMounts, cc.Spec.VolumeMounts...) + c.VolumeMounts = append(c.VolumeMounts, cc.Spec.VolumeMounts...) } if cc.Spec.ResourceRequirements != nil { c.Resources = *cc.Spec.ResourceRequirements.DeepCopy() @@ -235,7 +234,7 @@ func withDeploymentTemplate(dt *v1beta1.DeploymentTemplate) func(*v1beta1.Deploy } // shouldCreateDeploymentTemplate determines whether we should create a deployment -// template in the DeploymentRuntimeConfig +// template in the DeploymentRuntimeConfig. func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { //nolint:gocyclo // There are a lot of triggers for this, but it's not complex return len(cc.Labels) > 0 || len(cc.Annotations) > 0 || @@ -255,7 +254,7 @@ func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { //noli } // shouldCreateDeploymentTemplateContainer determines whether we should create a container -// entry in the DeploymentRuntimeConfig +// entry in the DeploymentRuntimeConfig. func shouldCreateDeploymentTemplateContainer(cc *v1alpha1.ControllerConfig) bool { return cc.Spec.Image != nil || cc.Spec.ImagePullPolicy != nil || diff --git a/cmd/crank/beta/convert/deploymentruntime/converter_test.go b/cmd/crank/beta/convert/deploymentruntime/converter_test.go index af85ebde7..268db1701 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter_test.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter_test.go @@ -79,12 +79,13 @@ func TestNewDeploymentTemplateFromControllerConfig(t *testing.T) { Affinity: &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{{ - MatchFields: []corev1.NodeSelectorRequirement{ - {Key: "xplane"}, + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "xplane"}, + }, }, }, - }, }, }, }, @@ -133,31 +134,33 @@ func TestNewDeploymentTemplateFromControllerConfig(t *testing.T) { Affinity: &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{{ - MatchFields: []corev1.NodeSelectorRequirement{ - {Key: "xplane"}, + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "xplane"}, + }, }, }, - }, }, }, }, - Containers: []corev1.Container{{ - Name: "package-runtime", - Args: []string{"- -d", "- --enable-management-policies"}, - Image: image, - Resources: corev1.ResourceRequirements{ - Limits: map[corev1.ResourceName]resource.Quantity{ - "cpu": *resource.NewMilliQuantity(5000, resource.DecimalSI), - "memory": *resource.NewQuantity(10*1024*1024*1024, resource.BinarySI), - }, - Requests: map[corev1.ResourceName]resource.Quantity{ - "cpu": *resource.NewMilliQuantity(1500, resource.DecimalSI), - "memory": *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), + Containers: []corev1.Container{ + { + Name: "package-runtime", + Args: []string{"- -d", "- --enable-management-policies"}, + Image: image, + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + "cpu": *resource.NewMilliQuantity(5000, resource.DecimalSI), + "memory": *resource.NewQuantity(10*1024*1024*1024, resource.BinarySI), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + "cpu": *resource.NewMilliQuantity(1500, resource.DecimalSI), + "memory": *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), + }, }, + VolumeMounts: []corev1.VolumeMount{{Name: "mount1", MountPath: "/tmp"}, {Name: "mount2", MountPath: "/etc/ssl/certs"}}, }, - VolumeMounts: []corev1.VolumeMount{{Name: "mount1", MountPath: "/tmp"}, {Name: "mount2", MountPath: "/etc/ssl/certs"}}, - }, }, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, @@ -284,7 +287,8 @@ func TestControllerConfigToRuntimeDeploymentConfig(t *testing.T) { Labels: map[string]string{}, CreationTimestamp: timeNow, }, - }}, + }, + }, }, }, }, @@ -323,7 +327,6 @@ func TestNewContainerFromControllerConfig(t *testing.T) { args args want want }{ - "NilControllerConfig": { reason: "Correctly return an empty container", args: args{ @@ -378,7 +381,6 @@ func TestNewContainerFromControllerConfig(t *testing.T) { if diff := cmp.Diff(tc.want.c, c, cmpopts.EquateApproxTime(time.Second*2)); diff != "" { t.Errorf("%s\ncontainerFromControllerConfig(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } diff --git a/cmd/crank/beta/convert/io/io.go b/cmd/crank/beta/convert/io/io.go index 0a3ba55dc..22ae64818 100644 --- a/cmd/crank/beta/convert/io/io.go +++ b/cmd/crank/beta/convert/io/io.go @@ -54,7 +54,7 @@ func WriteObjectYAML(fs afero.Fs, outputFile string, o runtime.Object) error { var output io.Writer if outputFile != "" { - f, err := fs.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY, 0644) + f, err := fs.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return errors.Wrap(err, "Unable to open output file") } diff --git a/cmd/crank/beta/convert/pipelinecomposition/cmd.go b/cmd/crank/beta/convert/pipelinecomposition/cmd.go index 236d43a40..7dd91a101 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/cmd.go +++ b/cmd/crank/beta/convert/pipelinecomposition/cmd.go @@ -32,11 +32,11 @@ import ( // Cmd arguments and flags for converting a patch-and-transform to a function pipeline composition. type Cmd struct { // Arguments. - InputFile string `arg:"" type:"path" optional:"" default:"-" help:"The Composition file to be converted. If not specified or '-', stdin will be used."` + InputFile string `arg:"" default:"-" help:"The Composition file to be converted. If not specified or '-', stdin will be used." optional:"" type:"path"` // Flags. - OutputFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the generated Composition to. If not specified, stdout will be used."` - FunctionName string `short:"f" type:"string" placeholder:"STRING" help:"FunctionRefName. Defaults to function-patch-and-transform."` + OutputFile string `help:"The file to write the generated Composition to. If not specified, stdout will be used." placeholder:"PATH" short:"o" type:"path"` + FunctionName string `help:"FunctionRefName. Defaults to function-patch-and-transform." placeholder:"STRING" short:"f" type:"string"` fs afero.Fs } diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 8e39413ab..a047de5a5 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -93,7 +93,7 @@ func convertPnTToPipeline(c *v1.Composition, functionRefName string) (*v1.Compos } // Override function name if provided - var fr = v1.FunctionReference{Name: defaultFunctionRefName} + fr := v1.FunctionReference{Name: defaultFunctionRefName} if functionRefName != "" { fr.Name = functionRefName } @@ -113,9 +113,9 @@ func convertPnTToPipeline(c *v1.Composition, functionRefName string) (*v1.Compos } // processFunctionInput populates any missing fields in the input to the function -// that are required by the function but were optional in the built-in engine +// that are required by the function but were optional in the built-in engine. func processFunctionInput(input *Input) *runtime.RawExtension { - var processedInput = &Input{} + processedInput := &Input{} // process Environment Patches if input.Environment != nil && len(input.Environment.Patches) > 0 { @@ -143,7 +143,7 @@ func processFunctionInput(input *Input) *runtime.RawExtension { processedInput.Resources = processedResources // Wrap the input in a RawExtension - var inputType = map[string]any{ + inputType := map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", "environment": processedInput.Environment.DeepCopy(), @@ -215,7 +215,7 @@ func setMissingResourceFields(idx int, rs v1.ComposedTemplate) v1.ComposedTempla } // setTransformTypeRequiredFields sets fields that are required with -// function-patch-and-transform but were optional with the built-in engine +// function-patch-and-transform but were optional with the built-in engine. func setTransformTypeRequiredFields(tt v1.Transform) v1.Transform { if tt.Type == "" { if tt.Math != nil { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 37629f34a..4ebdae0f2 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -105,7 +105,6 @@ func TestSetMissingConnectionDetailFields(t *testing.T) { if diff := cmp.Diff(tc.want.sk, sk); diff != "" { t.Errorf("%s\nsetMissingConnectionDetailFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -467,7 +466,6 @@ func TestSetTransformTypeRequiredFields(t *testing.T) { if diff := cmp.Diff(tc.want.tt, tt); diff != "" { t.Errorf("%s\nsetTransformTypeRequiredFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -692,7 +690,6 @@ func TestSetMissingPatchSetFields(t *testing.T) { if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("%s\nsetMissingPatchSetFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -721,7 +718,8 @@ func TestSetMissingEnvironmentPatchFields(t *testing.T) { Type: v1.PatchTypeCombineFromComposite, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, "TransformArrayMissingFields": { reason: "Nested missing Types are filled in for a transform array", args: args{ @@ -776,7 +774,8 @@ func TestSetMissingEnvironmentPatchFields(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { @@ -812,7 +811,8 @@ func TestSetMissingPatchFields(t *testing.T) { Type: v1.PatchTypeCombineFromComposite, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, "TransformArrayMissingFields": { reason: "Nested missing Types are filled in for a transform array", args: args{ @@ -867,7 +867,8 @@ func TestSetMissingPatchFields(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { @@ -884,7 +885,7 @@ func TestSetMissingResourceFields(t *testing.T) { empty := "" str := "crossplane" fcsk := v1.ConnectionDetailTypeFromConnectionSecretKey - var baseNoName = map[string]any{ + baseNoName := map[string]any{ "apiVersion": "nop.crossplane.io/v1", "kind": "TestResource", "spec": map[string]any{}, diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/beta/render/cmd.go index 05c19144f..7baf1eda0 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/beta/render/cmd.go @@ -39,20 +39,20 @@ import ( // Cmd arguments and flags for render subcommand. type Cmd struct { // Arguments. - CompositeResource string `arg:"" type:"existingfile" help:"A YAML file specifying the composite resource (XR) to render."` - Composition string `arg:"" type:"existingfile" help:"A YAML file specifying the Composition to use to render the XR. Must be mode: Pipeline."` - Functions string `arg:"" type:"path" help:"A YAML file or directory of YAML files specifying the Composition Functions to use to render the XR."` + CompositeResource string `arg:"" help:"A YAML file specifying the composite resource (XR) to render." type:"existingfile"` + Composition string `arg:"" help:"A YAML file specifying the Composition to use to render the XR. Must be mode: Pipeline." type:"existingfile"` + Functions string `arg:"" help:"A YAML file or directory of YAML files specifying the Composition Functions to use to render the XR." type:"path"` // Flags. Keep them in alphabetical order. - ContextFiles map[string]string `mapsep:"," help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be files containing JSON."` - ContextValues map[string]string `mapsep:"," help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be JSON. Keys take precedence over --context-files."` - IncludeFunctionResults bool `short:"r" help:"Include informational and warning messages from Functions in the rendered output as resources of kind: Result."` - IncludeFullXR bool `short:"x" help:"Include a direct copy of the input XR's spec and metadata fields in the rendered output."` - ObservedResources string `short:"o" placeholder:"PATH" type:"path" help:"A YAML file or directory of YAML files specifying the observed state of composed resources."` - ExtraResources string `short:"e" placeholder:"PATH" type:"path" help:"A YAML file or directory of YAML files specifying extra resources to pass to the Function pipeline."` - IncludeContext bool `short:"c" help:"Include the context in the rendered output as a resource of kind: Context."` - - Timeout time.Duration `help:"How long to run before timing out." default:"1m"` + ContextFiles map[string]string `help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be files containing JSON." mapsep:""` + ContextValues map[string]string `help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be JSON. Keys take precedence over --context-files." mapsep:""` + IncludeFunctionResults bool `help:"Include informational and warning messages from Functions in the rendered output as resources of kind: Result." short:"r"` + IncludeFullXR bool `help:"Include a direct copy of the input XR's spec and metadata fields in the rendered output." short:"x"` + ObservedResources string `help:"A YAML file or directory of YAML files specifying the observed state of composed resources." placeholder:"PATH" short:"o" type:"path"` + ExtraResources string `help:"A YAML file or directory of YAML files specifying extra resources to pass to the Function pipeline." placeholder:"PATH" short:"e" type:"path"` + IncludeContext bool `help:"Include the context in the rendered output as a resource of kind: Context." short:"c"` + + Timeout time.Duration `default:"1m" help:"How long to run before timing out."` fs afero.Fs } diff --git a/cmd/crank/beta/render/load_test.go b/cmd/crank/beta/render/load_test.go index f64cf7b22..d37e61437 100644 --- a/cmd/crank/beta/render/load_test.go +++ b/cmd/crank/beta/render/load_test.go @@ -37,10 +37,8 @@ import ( pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) -var ( - //go:embed testdata - testdatafs embed.FS -) +//go:embed testdata +var testdatafs embed.FS func TestLoadCompositeResource(t *testing.T) { fs := afero.FromIOFS{FS: testdatafs} @@ -411,16 +409,17 @@ func TestLoadYAMLStream(t *testing.T) { "Success": { args: args{ file: "testdata/observed.yaml", - fs: afero.FromIOFS{FS: fstest.MapFS{ - "testdata/observed.yaml": &fstest.MapFile{ - Data: []byte(`--- + fs: afero.FromIOFS{ + FS: fstest.MapFS{ + "testdata/observed.yaml": &fstest.MapFile{ + Data: []byte(`--- test: "test" --- test: "test2" `), + }, }, }, - }, }, want: want{ out: [][]byte{ diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/beta/render/render_test.go index 499da3107..ecd695e00 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/beta/render/render_test.go @@ -151,7 +151,6 @@ func TestRender(t *testing.T) { }, Functions: []pkgv1beta1.Function{ func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ Results: []*fnv1beta1.Result{ { @@ -206,7 +205,6 @@ func TestRender(t *testing.T) { }, Functions: []pkgv1beta1.Function{ func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ Desired: &fnv1beta1.State{ Composite: &fnv1beta1.Resource{ @@ -375,7 +373,6 @@ func TestRender(t *testing.T) { }, Functions: []pkgv1beta1.Function{ func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ Desired: &fnv1beta1.State{ Composite: &fnv1beta1.Resource{ @@ -739,7 +736,6 @@ func TestRender(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - out, err := Render(tc.args.ctx, logging.NewNopLogger(), tc.args.in) if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty()); diff != "" { diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index 12ff6dc3e..bef14f993 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -51,8 +51,8 @@ const ( // Cmd represents the top command. type Cmd struct { - Summary bool `short:"s" name:"summary" help:"Adds summary header for all Crossplane pods."` - Namespace string `short:"n" name:"namespace" help:"Show pods from a specific namespace, defaults to crossplane-system." default:"crossplane-system"` + Summary bool `help:"Adds summary header for all Crossplane pods." name:"summary" short:"s"` + Namespace string `default:"crossplane-system" help:"Show pods from a specific namespace, defaults to crossplane-system." name:"namespace" short:"n"` } // Help returns help instructions for the top command. @@ -130,7 +130,6 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc ctx := context.Background() pods, err := k8sClientset.CoreV1().Pods(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { return errors.Wrap(err, errFetchAllPods) } diff --git a/cmd/crank/beta/top/top_test.go b/cmd/crank/beta/top/top_test.go index 368668a2d..77072f664 100644 --- a/cmd/crank/beta/top/top_test.go +++ b/cmd/crank/beta/top/top_test.go @@ -94,7 +94,6 @@ func TestGetCrossplanePods(t *testing.T) { }, { ObjectMeta: metav1.ObjectMeta{ - Name: "provider-azure-storage", Namespace: "crossplane-system", Labels: map[string]string{ @@ -154,6 +153,7 @@ func TestGetCrossplanePods(t *testing.T) { }) } } + func TestPrintPodsTable(t *testing.T) { type want struct { results string @@ -236,6 +236,7 @@ function crossplane-system function-123 200m 1024Mi }) } } + func TestPrintPodsSummary(t *testing.T) { type want struct { results string @@ -297,7 +298,6 @@ CPU(cores): 900000m } }) } - } func TestCapitalizeFirst(t *testing.T) { diff --git a/cmd/crank/beta/trace/internal/printer/default.go b/cmd/crank/beta/trace/internal/printer/default.go index 7cd75eaee..17322e9e4 100644 --- a/cmd/crank/beta/trace/internal/printer/default.go +++ b/cmd/crank/beta/trace/internal/printer/default.go @@ -42,7 +42,7 @@ const ( errFlushTabWriter = "cannot flush tab writer" ) -// DefaultPrinter defines the DefaultPrinter configuration +// DefaultPrinter defines the DefaultPrinter configuration. type DefaultPrinter struct { wide bool } @@ -129,7 +129,6 @@ func getHeaders(gk schema.GroupKind, wide bool) (headers fmt.Stringer, isPackage ready: "READY", status: "STATUS", }, false - } // Print implements the Printer interface by prints the resource tree in a diff --git a/cmd/crank/beta/trace/internal/printer/default_test.go b/cmd/crank/beta/trace/internal/printer/default_test.go index 3ea26e686..1239741c6 100644 --- a/cmd/crank/beta/trace/internal/printer/default_test.go +++ b/cmd/crank/beta/trace/internal/printer/default_test.go @@ -151,5 +151,4 @@ Configuration/platform-ref-aws } }) } - } diff --git a/cmd/crank/beta/trace/internal/printer/dot.go b/cmd/crank/beta/trace/internal/printer/dot.go index a68db1b58..ba27ce1d2 100644 --- a/cmd/crank/beta/trace/internal/printer/dot.go +++ b/cmd/crank/beta/trace/internal/printer/dot.go @@ -16,9 +16,8 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource/xpkg" ) -// DotPrinter defines the DotPrinter configuration -type DotPrinter struct { -} +// DotPrinter defines the DotPrinter configuration. +type DotPrinter struct{} var _ Printer = &DotPrinter{} diff --git a/cmd/crank/beta/trace/internal/printer/dot_test.go b/cmd/crank/beta/trace/internal/printer/dot_test.go index 8743707b9..e1cfab84c 100644 --- a/cmd/crank/beta/trace/internal/printer/dot_test.go +++ b/cmd/crank/beta/trace/internal/printer/dot_test.go @@ -11,7 +11,7 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) -// Define a test for PrintDotGraph +// Define a test for PrintDotGraph. func TestPrintDotGraph(t *testing.T) { type args struct { resource *resource.Resource @@ -103,8 +103,6 @@ func TestPrintDotGraph(t *testing.T) { if diff := cmp.Diff(tc.want.dotString, got); diff != "" { t.Errorf("%s\nDotPrinter.Print(): -want, +got:\n%s", tc.reason, diff) } - }) - } } diff --git a/cmd/crank/beta/trace/internal/printer/json.go b/cmd/crank/beta/trace/internal/printer/json.go index 7089816fa..30cbabea9 100644 --- a/cmd/crank/beta/trace/internal/printer/json.go +++ b/cmd/crank/beta/trace/internal/printer/json.go @@ -31,8 +31,7 @@ const ( ) // JSONPrinter is a printer that prints the resource graph as JSON. -type JSONPrinter struct { -} +type JSONPrinter struct{} var _ Printer = &JSONPrinter{} diff --git a/cmd/crank/beta/trace/internal/printer/json_test.go b/cmd/crank/beta/trace/internal/printer/json_test.go index 6a6f1d9fb..c4e52ff4a 100644 --- a/cmd/crank/beta/trace/internal/printer/json_test.go +++ b/cmd/crank/beta/trace/internal/printer/json_test.go @@ -305,5 +305,4 @@ func TestJSONPrinter(t *testing.T) { } }) } - } diff --git a/cmd/crank/beta/trace/internal/printer/printer.go b/cmd/crank/beta/trace/internal/printer/printer.go index 597bc32bc..67b2143a6 100644 --- a/cmd/crank/beta/trace/internal/printer/printer.go +++ b/cmd/crank/beta/trace/internal/printer/printer.go @@ -33,7 +33,7 @@ const ( // Type represents the type of printer. type Type string -// Implemented PrinterTypes +// Implemented PrinterTypes. const ( TypeDefault Type = "default" TypeWide Type = "wide" diff --git a/cmd/crank/beta/trace/internal/resource/client.go b/cmd/crank/beta/trace/internal/resource/client.go index e5a910561..3e292023e 100644 --- a/cmd/crank/beta/trace/internal/resource/client.go +++ b/cmd/crank/beta/trace/internal/resource/client.go @@ -85,7 +85,6 @@ func GetResource(ctx context.Context, client client.Client, ref *v1.ObjectRefere result.SetGroupVersionKind(ref.GroupVersionKind()) err := client.Get(ctx, xpmeta.NamespacedNameOf(ref), &result) - if err != nil { // If the resource is not found, we still want to return a Resource // object with the name and namespace set, so that the caller can diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go index e697e9ee0..f710e276a 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go @@ -24,7 +24,7 @@ import ( ) // TODO add more cases, fake client -// Consider testing getPackageDeps instead to cover more +// Consider testing getPackageDeps instead to cover more. func TestGetDependencyRef(t *testing.T) { type args struct { pkgType v1beta1.PackageType diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go index a8b2b1b54..6a9e3609a 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/crossplane/crossplane/apis/pkg/v1" + v1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/apis/pkg/v1alpha1" "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index f525e87ed..6ad668ae2 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -58,16 +58,16 @@ const ( // Cmd builds the trace tree for a Crossplane resource. type Cmd struct { Resource string `arg:"" help:"Kind of the Crossplane resource, accepts the 'TYPE[.VERSION][.GROUP][/NAME]' format."` - Name string `arg:"" optional:"" help:"Name of the Crossplane resource, can be passed as part of the resource too."` + Name string `arg:"" help:"Name of the Crossplane resource, can be passed as part of the resource too." optional:""` // TODO(phisco): add support for all the usual kubectl flags; configFlags := genericclioptions.NewConfigFlags(true).AddFlags(...) // TODO(phisco): move to namespace defaulting to "" and use the current context's namespace - Namespace string `short:"n" name:"namespace" help:"Namespace of the resource." default:"default"` - Output string `short:"o" name:"output" help:"Output format. One of: default, wide, json, dot." enum:"default,wide,json,dot" default:"default"` - ShowConnectionSecrets bool `short:"s" name:"show-connection-secrets" help:"Show connection secrets in the output."` - ShowPackageDependencies string `name:"show-package-dependencies" help:"Show package dependencies in the output. One of: unique, all, none." enum:"unique,all,none" default:"unique"` - ShowPackageRevisions string `name:"show-package-revisions" help:"Show package revisions in the output. One of: active, all, none." enum:"active,all,none" default:"active"` - ShowPackageRuntimeConfigs bool `name:"show-package-runtime-configs" help:"Show package runtime configs in the output." default:"false"` + Namespace string `default:"default" help:"Namespace of the resource." name:"namespace" short:"n"` + Output string `default:"default" enum:"default,wide,json,dot" help:"Output format. One of: default, wide, json, dot." name:"output" short:"o"` + ShowConnectionSecrets bool `help:"Show connection secrets in the output." name:"show-connection-secrets" short:"s"` + ShowPackageDependencies string `default:"unique" enum:"unique,all,none" help:"Show package dependencies in the output. One of: unique, all, none." name:"show-package-dependencies"` + ShowPackageRevisions string `default:"active" enum:"active,all,none" help:"Show package revisions in the output. One of: active, all, none." name:"show-package-revisions"` + ShowPackageRuntimeConfigs bool `default:"false" help:"Show package runtime configs in the output." name:"show-package-runtime-configs"` } // Help returns help message for the trace command. diff --git a/cmd/crank/beta/validate/cache.go b/cmd/crank/beta/validate/cache.go index eda26ff84..2a19b035e 100644 --- a/cmd/crank/beta/validate/cache.go +++ b/cmd/crank/beta/validate/cache.go @@ -27,7 +27,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// Cache defines an interface for caching schemas +// Cache defines an interface for caching schemas. type Cache interface { Store([][]byte, string) error Flush() error @@ -36,13 +36,13 @@ type Cache interface { Exists(string) (string, error) } -// LocalCache implements the Cache interface +// LocalCache implements the Cache interface. type LocalCache struct { fs afero.Fs cacheDir string } -// Store stores the schemas in the directory +// Store stores the schemas in the directory. func (c *LocalCache) Store(schemas [][]byte, path string) error { if err := c.fs.MkdirAll(path, os.ModePerm); err != nil { return errors.Wrapf(err, "cannot create directory %s", path) @@ -68,7 +68,7 @@ func (c *LocalCache) Store(schemas [][]byte, path string) error { return nil } -// Init creates the cache directory if it doesn't exist +// Init creates the cache directory if it doesn't exist. func (c *LocalCache) Init() error { if _, err := c.fs.Stat(c.cacheDir); os.IsNotExist(err) { if err := c.fs.MkdirAll(c.cacheDir, os.ModePerm); err != nil { @@ -81,12 +81,12 @@ func (c *LocalCache) Init() error { return nil } -// Flush removes the cache directory +// Flush removes the cache directory. func (c *LocalCache) Flush() error { return c.fs.RemoveAll(c.cacheDir) } -// Load loads the schemas from the cache directory +// Load loads the schemas from the cache directory. func (c *LocalCache) Load() ([]*unstructured.Unstructured, error) { loader, err := NewLoader(c.cacheDir) if err != nil { @@ -101,7 +101,7 @@ func (c *LocalCache) Load() ([]*unstructured.Unstructured, error) { return schemas, nil } -// Exists checks if the cache contains the image and returns the path if it doesn't exist +// Exists checks if the cache contains the image and returns the path if it doesn't exist. func (c *LocalCache) Exists(image string) (string, error) { fName := strings.ReplaceAll(image, ":", "@") path := filepath.Join(c.cacheDir, fName) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index eb7960c6f..161d1866b 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -35,7 +35,7 @@ type Cmd struct { Resources string `arg:"" help:"Resources source which can be a file, directory, or '-' for standard input."` // Flags. Keep them in alphabetical order. - CacheDir string `help:"Absolute path to the cache directory where downloaded schemas are stored." default:".crossplane/cache"` + CacheDir string `default:".crossplane/cache" help:"Absolute path to the cache directory where downloaded schemas are stored."` CleanCache bool `help:"Clean the cache directory before downloading package schemas."` SkipSuccessResults bool `help:"Skip printing success results."` diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index 0c8fc2cf4..a308bb426 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -27,15 +27,15 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// ImageFetcher defines an interface for fetching images +// ImageFetcher defines an interface for fetching images. type ImageFetcher interface { FetchBaseLayer(image string) (*conregv1.Layer, error) } -// Fetcher implements the ImageFetcher interface +// Fetcher implements the ImageFetcher interface. type Fetcher struct{} -// FetchBaseLayer fetches the base layer of the image which contains the 'package.yaml' file +// FetchBaseLayer fetches the base layer of the image which contains the 'package.yaml' file. func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { if strings.Contains(image, "sha") { // Strip the digest before fetching the image image = strings.Split(image, "@")[0] diff --git a/cmd/crank/beta/validate/loader.go b/cmd/crank/beta/validate/loader.go index 96741da8a..6b610779b 100644 --- a/cmd/crank/beta/validate/loader.go +++ b/cmd/crank/beta/validate/loader.go @@ -28,12 +28,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// Loader interface defines the contract for different input sources +// Loader interface defines the contract for different input sources. type Loader interface { Load() ([]*unstructured.Unstructured, error) } -// NewLoader returns a Loader based on the input source +// NewLoader returns a Loader based on the input source. func NewLoader(input string) (Loader, error) { if input == "-" { return &StdinLoader{}, nil @@ -51,10 +51,10 @@ func NewLoader(input string) (Loader, error) { return &FileLoader{path: input}, nil } -// StdinLoader implements the Loader interface for reading from stdin +// StdinLoader implements the Loader interface for reading from stdin. type StdinLoader struct{} -// Load reads the contents from stdin +// Load reads the contents from stdin. func (s *StdinLoader) Load() ([]*unstructured.Unstructured, error) { stream, err := load(os.Stdin) if err != nil { @@ -64,12 +64,12 @@ func (s *StdinLoader) Load() ([]*unstructured.Unstructured, error) { return streamToUnstructured(stream) } -// FileLoader implements the Loader interface for reading from a file and converting input to unstructured objects +// FileLoader implements the Loader interface for reading from a file and converting input to unstructured objects. type FileLoader struct { path string } -// Load reads the contents from a file +// Load reads the contents from a file. func (f *FileLoader) Load() ([]*unstructured.Unstructured, error) { stream, err := readFile(f.path) if err != nil { @@ -79,12 +79,12 @@ func (f *FileLoader) Load() ([]*unstructured.Unstructured, error) { return streamToUnstructured(stream) } -// FolderLoader implements the Loader interface for reading from a folder +// FolderLoader implements the Loader interface for reading from a folder. type FolderLoader struct { path string } -// Load reads the contents from all files in a folder +// Load reads the contents from all files in a folder. func (f *FolderLoader) Load() ([]*unstructured.Unstructured, error) { var stream [][]byte err := filepath.Walk(f.path, func(path string, info os.FileInfo, err error) error { @@ -114,7 +114,6 @@ func isYamlFile(info os.FileInfo) bool { func readFile(path string) ([][]byte, error) { f, err := os.Open(filepath.Clean(path)) - if err != nil { return nil, errors.Wrap(err, "cannot open file") } diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 2bab67ed1..84a2f49c4 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -44,7 +44,7 @@ const ( imageFmt = "%s:%s" ) -// Manager defines a Manager for preparing Crossplane packages for validation +// Manager defines a Manager for preparing Crossplane packages for validation. type Manager struct { fetcher ImageFetcher cache Cache @@ -55,7 +55,7 @@ type Manager struct { confs map[string]bool // Configuration images } -// NewManager returns a new Manager +// NewManager returns a new Manager. func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m := &Manager{} @@ -73,7 +73,7 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { return m } -// PrepExtensions converts the unstructured XRDs/CRDs to CRDs and extract package images to add as a dependency +// PrepExtensions converts the unstructured XRDs/CRDs to CRDs and extract package images to add as a dependency. func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error { //nolint:gocyclo // the function itself is not that complex, it just has different cases for _, e := range extensions { switch e.GroupVersionKind().GroupKind() { @@ -142,7 +142,7 @@ func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error return nil } -// CacheAndLoad finds and caches dependencies and loads them as CRDs +// CacheAndLoad finds and caches dependencies and loads them as CRDs. func (m *Manager) CacheAndLoad(cleanCache bool) error { if cleanCache { if err := m.cache.Flush(); err != nil { diff --git a/cmd/crank/beta/validate/validate.go b/cmd/crank/beta/validate/validate.go index a8e4d63b6..a618a7509 100644 --- a/cmd/crank/beta/validate/validate.go +++ b/cmd/crank/beta/validate/validate.go @@ -90,7 +90,7 @@ func newValidatorsAndStructurals(crds []*extv1.CustomResourceDefinition) (map[ru return validators, structurals, nil } -// SchemaValidation validates the resources against the given CRDs +// SchemaValidation validates the resources against the given CRDs. func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.CustomResourceDefinition, skipSuccessLogs bool, w io.Writer) error { //nolint:gocyclo // printing the output increases the cyclomatic complexity a little bit schemaValidators, structurals, err := newValidatorsAndStructurals(crds) if err != nil { diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index fa609118e..257cf4599 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -56,9 +56,9 @@ type initCmd struct { Name string `arg:"" help:"The name of the new package to initialize."` Template string `arg:"" help:"The template name or URL to use to initialize the new package."` - Directory string `short:"d" default:"." type:"path" help:"The directory to initialize. It must be empty. It will be created if it doesn't exist."` - RunInitScript bool `short:"r" name:"run-init-script" help:"Runs the init.sh script if it exists without prompting"` - RefName string `short:"b" name:"ref-name" help:"The branch or tag to clone from the template repository."` + Directory string `default:"." help:"The directory to initialize. It must be empty. It will be created if it doesn't exist." short:"d" type:"path"` + RunInitScript bool `help:"Runs the init.sh script if it exists without prompting" name:"run-init-script" short:"r"` + RefName string `help:"The branch or tag to clone from the template repository." name:"ref-name" short:"b"` } func (c *initCmd) Help() string { @@ -108,7 +108,7 @@ func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:g case err == nil && !f.IsDir(): return errors.Errorf("path %s is not a directory", c.Directory) case os.IsNotExist(err): - if err := os.MkdirAll(c.Directory, 0750); err != nil { + if err := os.MkdirAll(c.Directory, 0o750); err != nil { return errors.Wrapf(err, "failed to create directory %s", c.Directory) } logger.Debug("Created directory", "path", c.Directory) diff --git a/cmd/crank/beta/xpkg/init_test.go b/cmd/crank/beta/xpkg/init_test.go index 032bade05..c905012b6 100644 --- a/cmd/crank/beta/xpkg/init_test.go +++ b/cmd/crank/beta/xpkg/init_test.go @@ -57,7 +57,7 @@ func TestHandleNotes(t *testing.T) { logger := logging.NewNopLogger() dir := t.TempDir() if tc.args.file != "" { - if err := os.WriteFile(filepath.Join(dir, notes), []byte(tc.args.file), 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, notes), []byte(tc.args.file), 0o644); err != nil { t.Fatalf("writeFile() error = %v", err) } } diff --git a/cmd/crank/main.go b/cmd/crank/main.go index c3aefdb3a..928ac155f 100644 --- a/cmd/crank/main.go +++ b/cmd/crank/main.go @@ -32,8 +32,10 @@ import ( var _ = kong.Must(&cli) -type versionFlag string -type verboseFlag bool +type ( + versionFlag string + verboseFlag bool +) // Decode overrides the default string decoder to be a no-op. func (v versionFlag) Decode(_ *kong.DecodeContext) error { return nil } @@ -68,8 +70,8 @@ var cli struct { Beta beta.Cmd `cmd:"" help:"Beta commands."` // Flags. - Verbose verboseFlag `name:"verbose" help:"Print verbose logging statements."` - Version versionFlag `short:"v" name:"version" help:"Print version and quit."` + Verbose verboseFlag `help:"Print verbose logging statements." name:"verbose"` + Version versionFlag `help:"Print version and quit." name:"version" short:"v"` } func main() { diff --git a/cmd/crank/xpkg/build.go b/cmd/crank/xpkg/build.go index 95d728f26..1442b9b42 100644 --- a/cmd/crank/xpkg/build.go +++ b/cmd/crank/xpkg/build.go @@ -94,12 +94,12 @@ func (c *buildCmd) AfterApply() error { // buildCmd builds a crossplane package. type buildCmd struct { // Flags. Keep sorted alphabetically. - EmbedRuntimeImage string `placeholder:"NAME" help:"An OCI image to embed in the package as its runtime." xor:"runtime-image"` - EmbedRuntimeImageTarball string `placeholder:"PATH" type:"existingfile" help:"An OCI image tarball to embed in the package as its runtime." xor:"runtime-image"` - ExamplesRoot string `short:"e" type:"path" help:"A directory of example YAML files to include in the package." default:"./examples"` - Ignore []string `placeholder:"PATH" help:"Comma-separated file paths, specified relative to --package-root, to exclude from the package. Wildcards are supported. Directories cannot be excluded."` - PackageFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the package to. Defaults to a generated filename in --package-root."` - PackageRoot string `short:"f" type:"existingdir" help:"The directory that contains the package's crossplane.yaml file." default:"."` + EmbedRuntimeImage string `help:"An OCI image to embed in the package as its runtime." placeholder:"NAME" xor:"runtime-image"` + EmbedRuntimeImageTarball string `help:"An OCI image tarball to embed in the package as its runtime." placeholder:"PATH" type:"existingfile" xor:"runtime-image"` + ExamplesRoot string `default:"./examples" help:"A directory of example YAML files to include in the package." short:"e" type:"path"` + Ignore []string `help:"Comma-separated file paths, specified relative to --package-root, to exclude from the package. Wildcards are supported. Directories cannot be excluded." placeholder:"PATH"` + PackageFile string `help:"The file to write the package to. Defaults to a generated filename in --package-root." placeholder:"PATH" short:"o" type:"path"` + PackageRoot string `default:"." help:"The directory that contains the package's crossplane.yaml file." short:"f" type:"existingdir"` // Internal state. These aren't part of the user-exposed CLI structure. fs afero.Fs @@ -148,7 +148,6 @@ func (c *buildCmd) GetRuntimeBaseImageOpts() ([]xpkg.BuildOpt, error) { return []xpkg.BuildOpt{xpkg.WithBase(img)}, nil } return nil, nil - } // GetOutputFileName prepares output file name. diff --git a/cmd/crank/xpkg/install.go b/cmd/crank/xpkg/install.go index b09b1d7fd..bcadf9b01 100644 --- a/cmd/crank/xpkg/install.go +++ b/cmd/crank/xpkg/install.go @@ -53,16 +53,16 @@ const ( // installCmd installs a package. type installCmd struct { // Arguments. - Kind string `arg:"" help:"The kind of package to install. One of \"provider\", \"configuration\", or \"function\"." enum:"provider,configuration,function"` + Kind string `arg:"" enum:"provider,configuration,function" help:"The kind of package to install. One of \"provider\", \"configuration\", or \"function\"."` Package string `arg:"" help:"The package to install."` - Name string `arg:"" optional:"" help:"The name of the new package in the Crossplane API. Derived from the package repository and tag by default."` + Name string `arg:"" help:"The name of the new package in the Crossplane API. Derived from the package repository and tag by default." optional:""` // Flags. Keep sorted alphabetically. - RuntimeConfig string `placeholder:"NAME" help:"Install the package with a runtime configuration (for example a DeploymentRuntimeConfig)."` - ManualActivation bool `short:"m" help:"Require the new package's first revision to be manually activated."` - PackagePullSecrets []string `placeholder:"NAME" help:"A comma-separated list of secrets the package manager should use to pull the package from the registry."` - RevisionHistoryLimit int64 `short:"r" placeholder:"LIMIT" help:"How many package revisions may exist before the oldest revisions are deleted."` - Wait time.Duration `short:"w" default:"0s" help:"How long to wait for the package to install before returning. The command does not wait by default. Returns an error if the timeout is exceeded."` + RuntimeConfig string `help:"Install the package with a runtime configuration (for example a DeploymentRuntimeConfig)." placeholder:"NAME"` + ManualActivation bool `help:"Require the new package's first revision to be manually activated." short:"m"` + PackagePullSecrets []string `help:"A comma-separated list of secrets the package manager should use to pull the package from the registry." placeholder:"NAME"` + RevisionHistoryLimit int64 `help:"How many package revisions may exist before the oldest revisions are deleted." placeholder:"LIMIT" short:"r"` + Wait time.Duration `default:"0s" help:"How long to wait for the package to install before returning. The command does not wait by default. Returns an error if the timeout is exceeded." short:"w"` } func (c *installCmd) Help() string { @@ -200,7 +200,6 @@ func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { //nolin if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { return errors.Wrap(err, "Package did not become ready") } - } _, err = fmt.Fprintf(k.Stdout, "%s/%s created\n", c.Kind, pkg.GetName()) diff --git a/cmd/crank/xpkg/login.go b/cmd/crank/xpkg/login.go index b0afe6d76..614bbc446 100644 --- a/cmd/crank/xpkg/login.go +++ b/cmd/crank/xpkg/login.go @@ -49,9 +49,9 @@ const ( type loginCmd struct { // Flags. We're intentionally making an exception to the rule here and not // sorting these alphabetically. - Username string `short:"u" env:"UP_USER" xor:"identifier" help:"Username used to authenticate."` - Password string `short:"p" env:"UP_PASSWORD" help:"Password for specified username. '-' to read from stdin."` - Token string `short:"t" env:"UP_TOKEN" xor:"identifier" help:"Token used to authenticate. '-' to read from stdin."` + Username string `env:"UP_USER" help:"Username used to authenticate." short:"u" xor:"identifier"` + Password string `env:"UP_PASSWORD" help:"Password for specified username. '-' to read from stdin." short:"p"` + Token string `env:"UP_TOKEN" help:"Token used to authenticate. '-' to read from stdin." short:"t" xor:"identifier"` // Common Upbound API configuration. upbound.Flags `embed:""` @@ -212,8 +212,8 @@ func getPassword(f *os.File) (string, error) { // Print a new line because ReadPassword does not. _, _ = fmt.Fprintf(f, "\n") return string(password), nil - } + func getUsername(f *os.File) (string, error) { if !term.IsTerminal(int(f.Fd())) { return "", errors.New("not a terminal") diff --git a/cmd/crank/xpkg/push.go b/cmd/crank/xpkg/push.go index eca0aee0d..21870818f 100644 --- a/cmd/crank/xpkg/push.go +++ b/cmd/crank/xpkg/push.go @@ -61,7 +61,7 @@ type pushCmd struct { Package string `arg:"" help:"Where to push the package."` // Flags. Keep sorted alphabetically. - PackageFiles []string `short:"f" type:"existingfile" placeholder:"PATH" help:"A comma-separated list of xpkg files to push."` + PackageFiles []string `help:"A comma-separated list of xpkg files to push." placeholder:"PATH" short:"f" type:"existingfile"` // Common Upbound API configuration. upbound.Flags `embed:""` diff --git a/cmd/crank/xpkg/update.go b/cmd/crank/xpkg/update.go index 97f532246..2c3e0fb8f 100644 --- a/cmd/crank/xpkg/update.go +++ b/cmd/crank/xpkg/update.go @@ -42,9 +42,9 @@ import ( // updateCmd updates a package. type updateCmd struct { // Arguments. - Kind string `arg:"" help:"The kind of package to update. One of \"provider\", \"configuration\", or \"function\"." enum:"provider,configuration,function"` + Kind string `arg:"" enum:"provider,configuration,function" help:"The kind of package to update. One of \"provider\", \"configuration\", or \"function\"."` Package string `arg:"" help:"The package to update to."` - Name string `arg:"" optional:"" help:"The name of the package to update in the Crossplane API. Derived from the package repository and tag by default."` + Name string `arg:"" help:"The name of the package to update in the Crossplane API. Derived from the package repository and tag by default." optional:""` } func (c *updateCmd) Help() string { diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 58a2e87e9..0781968c8 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -60,7 +60,7 @@ import ( "github.com/crossplane/crossplane/internal/xpkg" ) -// Command runs the core crossplane controllers +// Command runs the core crossplane controllers. type Command struct { Start startCommand `cmd:"" help:"Start Crossplane controllers."` Init initCommand `cmd:"" help:"Make cluster ready for Crossplane controllers."` @@ -81,28 +81,28 @@ func (c *Command) Run() error { } type startCommand struct { - Profile string `placeholder:"host:port" help:"Serve runtime profiling data via HTTP at /debug/pprof."` + Profile string `help:"Serve runtime profiling data via HTTP at /debug/pprof." placeholder:"host:port"` - Namespace string `short:"n" help:"Namespace used to unpack and run packages." default:"crossplane-system" env:"POD_NAMESPACE"` - ServiceAccount string `help:"Name of the Crossplane Service Account." default:"crossplane" env:"POD_SERVICE_ACCOUNT"` - CacheDir string `short:"c" help:"Directory used for caching package images." default:"/cache" env:"CACHE_DIR"` - LeaderElection bool `short:"l" help:"Use leader election for the controller manager." default:"false" env:"LEADER_ELECTION"` - Registry string `short:"r" help:"Default registry used to fetch packages when not specified in tag." default:"${default_registry}" env:"REGISTRY"` - CABundlePath string `help:"Additional CA bundle to use when fetching packages from registry." env:"CA_BUNDLE_PATH"` - UserAgent string `help:"The User-Agent header that will be set on all package requests." default:"${default_user_agent}" env:"USER_AGENT"` + Namespace string `default:"crossplane-system" env:"POD_NAMESPACE" help:"Namespace used to unpack and run packages." short:"n"` + ServiceAccount string `default:"crossplane" env:"POD_SERVICE_ACCOUNT" help:"Name of the Crossplane Service Account."` + CacheDir string `default:"/cache" env:"CACHE_DIR" help:"Directory used for caching package images." short:"c"` + LeaderElection bool `default:"false" env:"LEADER_ELECTION" help:"Use leader election for the controller manager." short:"l"` + Registry string `default:"${default_registry}" env:"REGISTRY" help:"Default registry used to fetch packages when not specified in tag." short:"r"` + CABundlePath string `env:"CA_BUNDLE_PATH" help:"Additional CA bundle to use when fetching packages from registry."` + UserAgent string `default:"${default_user_agent}" env:"USER_AGENT" help:"The User-Agent header that will be set on all package requests."` - PackageRuntime string `helm:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)" default:"Deployment" env:"PACKAGE_RUNTIME"` + PackageRuntime string `default:"Deployment" env:"PACKAGE_RUNTIME" helm:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)"` - SyncInterval time.Duration `short:"s" help:"How often all resources will be double-checked for drift from the desired state." default:"1h"` - PollInterval time.Duration `help:"How often individual resources will be checked for drift from the desired state." default:"1m"` - MaxReconcileRate int `help:"The global maximum rate per second at which resources may checked for drift from the desired state." default:"10"` + SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` + PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` + MaxReconcileRate int `default:"10" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` - WebhookEnabled bool `help:"Enable webhook configuration." default:"true" env:"WEBHOOK_ENABLED"` + WebhookEnabled bool `default:"true" env:"WEBHOOK_ENABLED" help:"Enable webhook configuration."` - TLSServerSecretName string `help:"The name of the TLS Secret that will store Crossplane's server certificate." env:"TLS_SERVER_SECRET_NAME"` - TLSServerCertsDir string `help:"The path of the folder which will store TLS server certificate of Crossplane." env:"TLS_SERVER_CERTS_DIR"` - TLSClientSecretName string `help:"The name of the TLS Secret that will be store Crossplane's client certificate." env:"TLS_CLIENT_SECRET_NAME"` - TLSClientCertsDir string `help:"The path of the folder which will store TLS client certificate of Crossplane." env:"TLS_CLIENT_CERTS_DIR"` + TLSServerSecretName string `env:"TLS_SERVER_SECRET_NAME" help:"The name of the TLS Secret that will store Crossplane's server certificate."` + TLSServerCertsDir string `env:"TLS_SERVER_CERTS_DIR" help:"The path of the folder which will store TLS server certificate of Crossplane."` + TLSClientSecretName string `env:"TLS_CLIENT_SECRET_NAME" help:"The name of the TLS Secret that will be store Crossplane's client certificate."` + TLSClientCertsDir string `env:"TLS_CLIENT_CERTS_DIR" help:"The path of the folder which will store TLS client certificate of Crossplane."` EnableEnvironmentConfigs bool `group:"Alpha Features:" help:"Enable support for EnvironmentConfigs."` EnableExternalSecretStores bool `group:"Alpha Features:" help:"Enable support for External Secret Stores."` @@ -110,10 +110,10 @@ type startCommand struct { EnableRealtimeCompositions bool `group:"Alpha Features:" help:"Enable support for realtime compositions, i.e. watching composed resources and reconciling compositions immediately when any of the composed resources is updated."` EnableSSAClaims bool `group:"Alpha Features:" help:"Enable support for using Kubernetes server-side apply to sync claims with composite resources (XRs)."` - EnableCompositionFunctions bool `group:"Beta Features:" default:"true" help:"Enable support for Composition Functions."` - EnableCompositionFunctionsExtraResources bool `group:"Beta Features:" default:"true" help:"Enable support for Composition Functions Extra Resources. Only respected if --enable-composition-functions is set to true."` - EnableCompositionWebhookSchemaValidation bool `group:"Beta Features:" default:"true" help:"Enable support for Composition validation using schemas."` - EnableDeploymentRuntimeConfigs bool `group:"Beta Features:" default:"true" help:"Enable support for Deployment Runtime Configs."` + EnableCompositionFunctions bool `default:"true" group:"Beta Features:" help:"Enable support for Composition Functions."` + EnableCompositionFunctionsExtraResources bool `default:"true" group:"Beta Features:" help:"Enable support for Composition Functions Extra Resources. Only respected if --enable-composition-functions is set to true."` + EnableCompositionWebhookSchemaValidation bool `default:"true" group:"Beta Features:" help:"Enable support for Composition validation using schemas."` + EnableDeploymentRuntimeConfigs bool `default:"true" group:"Beta Features:" help:"Enable support for Deployment Runtime Configs."` // These are GA features that previously had alpha or beta feature flags. // You can't turn off a GA feature. We maintain the flags to avoid breaking diff --git a/cmd/crossplane/core/init.go b/cmd/crossplane/core/init.go index fbe6ba0aa..aefb001cb 100644 --- a/cmd/crossplane/core/init.go +++ b/cmd/crossplane/core/init.go @@ -34,19 +34,19 @@ import ( // initCommand configuration for the initialization of core Crossplane controllers. type initCommand struct { - Providers []string `name:"provider" help:"Pre-install a Provider by giving its image URI. This argument can be repeated."` - Configurations []string `name:"configuration" help:"Pre-install a Configuration by giving its image URI. This argument can be repeated."` - Namespace string `short:"n" help:"Namespace used to set as default scope in default secret store config." default:"crossplane-system" env:"POD_NAMESPACE"` - ServiceAccount string `help:"Name of the Crossplane Service Account." default:"crossplane" env:"POD_SERVICE_ACCOUNT"` + Providers []string `help:"Pre-install a Provider by giving its image URI. This argument can be repeated." name:"provider"` + Configurations []string `help:"Pre-install a Configuration by giving its image URI. This argument can be repeated." name:"configuration"` + Namespace string `default:"crossplane-system" env:"POD_NAMESPACE" help:"Namespace used to set as default scope in default secret store config." short:"n"` + ServiceAccount string `default:"crossplane" env:"POD_SERVICE_ACCOUNT" help:"Name of the Crossplane Service Account."` - WebhookEnabled bool `help:"Enable webhook configuration." default:"true" env:"WEBHOOK_ENABLED"` - WebhookServiceName string `help:"The name of the Service object that the webhook service will be run." env:"WEBHOOK_SERVICE_NAME"` - WebhookServiceNamespace string `help:"The namespace of the Service object that the webhook service will be run." env:"WEBHOOK_SERVICE_NAMESPACE"` - WebhookServicePort int32 `help:"The port of the Service that the webhook service will be run." env:"WEBHOOK_SERVICE_PORT"` - ESSTLSServerSecretName string `help:"The name of the Secret that the initializer will fill with ESS TLS server certificate." env:"ESS_TLS_SERVER_SECRET_NAME"` - TLSCASecretName string `help:"The name of the Secret that the initializer will fill with TLS CA certificate." env:"TLS_CA_SECRET_NAME"` - TLSServerSecretName string `help:"The name of the Secret that the initializer will fill with TLS server certificates." env:"TLS_SERVER_SECRET_NAME"` - TLSClientSecretName string `help:"The name of the Secret that the initializer will fill with TLS client certificates." env:"TLS_CLIENT_SECRET_NAME"` + WebhookEnabled bool `default:"true" env:"WEBHOOK_ENABLED" help:"Enable webhook configuration."` + WebhookServiceName string `env:"WEBHOOK_SERVICE_NAME" help:"The name of the Service object that the webhook service will be run."` + WebhookServiceNamespace string `env:"WEBHOOK_SERVICE_NAMESPACE" help:"The namespace of the Service object that the webhook service will be run."` + WebhookServicePort int32 `env:"WEBHOOK_SERVICE_PORT" help:"The port of the Service that the webhook service will be run."` + ESSTLSServerSecretName string `env:"ESS_TLS_SERVER_SECRET_NAME" help:"The name of the Secret that the initializer will fill with ESS TLS server certificate."` + TLSCASecretName string `env:"TLS_CA_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS CA certificate."` + TLSServerSecretName string `env:"TLS_SERVER_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS server certificates."` + TLSClientSecretName string `env:"TLS_CLIENT_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS client certificates."` } // Run starts the initialization process. diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index 0e0b71daf..1ac5e5500 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -41,15 +41,17 @@ import ( "github.com/crossplane/crossplane/internal/version" ) -type debugFlag bool -type versionFlag bool +type ( + debugFlag bool + versionFlag bool +) var cli struct { - Debug debugFlag `short:"d" help:"Print verbose logging statements."` + Debug debugFlag `help:"Print verbose logging statements." short:"d"` - Version versionFlag `short:"v" help:"Print version and quit."` + Version versionFlag `help:"Print version and quit." short:"v"` - Core core.Command `cmd:"" help:"Start core Crossplane controllers." default:"withargs"` + Core core.Command `cmd:"" default:"withargs" help:"Start core Crossplane controllers."` Rbac rbac.Command `cmd:"" help:"Start Crossplane RBAC Manager controllers."` } diff --git a/cmd/crossplane/rbac/rbac.go b/cmd/crossplane/rbac/rbac.go index 469d81a1c..671918435 100644 --- a/cmd/crossplane/rbac/rbac.go +++ b/cmd/crossplane/rbac/rbac.go @@ -56,7 +56,7 @@ var KongVars = kong.Vars{ "rbac_default_registry": xpkg.DefaultRegistry, } -// Command runs the crossplane RBAC controllers +// Command runs the crossplane RBAC controllers. type Command struct { Start startCommand `cmd:"" help:"Start Crossplane RBAC controllers."` Init initCommand `cmd:"" help:"Initialize RBAC Manager."` @@ -70,18 +70,18 @@ func (c *Command) Run() error { } type startCommand struct { - Profile string `placeholder:"host:port" help:"Serve runtime profiling data via HTTP at /debug/pprof."` + Profile string `help:"Serve runtime profiling data via HTTP at /debug/pprof." placeholder:"host:port"` - ProviderClusterRole string `name:"provider-clusterrole" help:"A ClusterRole enumerating the permissions provider packages may request."` - LeaderElection bool `name:"leader-election" short:"l" help:"Use leader election for the controller manager." env:"LEADER_ELECTION"` - Registry string `short:"r" help:"Default registry used to fetch packages when not specified in tag." default:"${rbac_default_registry}" env:"REGISTRY"` + ProviderClusterRole string `help:"A ClusterRole enumerating the permissions provider packages may request." name:"provider-clusterrole"` + LeaderElection bool `env:"LEADER_ELECTION" help:"Use leader election for the controller manager." name:"leader-election" short:"l"` + Registry string `default:"${rbac_default_registry}" env:"REGISTRY" help:"Default registry used to fetch packages when not specified in tag." short:"r"` - ManagementPolicy string `name:"manage" short:"m" hidden:""` - DeprecatedManagementPolicy string `name:"deprecated-manage" hidden:"" default:"${rbac_manage_default_var}" enum:"${rbac_manage_enum_var}"` + ManagementPolicy string `hidden:"" name:"manage" short:"m"` + DeprecatedManagementPolicy string `default:"${rbac_manage_default_var}" enum:"${rbac_manage_enum_var}" hidden:"" name:"deprecated-manage"` - SyncInterval time.Duration `short:"s" help:"How often all resources will be double-checked for drift from the desired state." default:"1h"` - PollInterval time.Duration `help:"How often individual resources will be checked for drift from the desired state." default:"1m"` - MaxReconcileRate int `help:"The global maximum rate per second at which resources may checked for drift from the desired state." default:"10"` + SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` + PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` + MaxReconcileRate int `default:"10" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` } // Run the RBAC manager. diff --git a/internal/controller/apiextensions/claim/connection.go b/internal/controller/apiextensions/claim/connection.go index 0ec175c94..c40bf8ece 100644 --- a/internal/controller/apiextensions/claim/connection.go +++ b/internal/controller/apiextensions/claim/connection.go @@ -43,7 +43,7 @@ const ( // NopConnectionUnpublisher is a ConnectionUnpublisher that does nothing. type NopConnectionUnpublisher struct{} -// NewNopConnectionUnpublisher returns a new NopConnectionUnpublisher +// NewNopConnectionUnpublisher returns a new NopConnectionUnpublisher. func NewNopConnectionUnpublisher() *NopConnectionUnpublisher { return &NopConnectionUnpublisher{} } diff --git a/internal/controller/apiextensions/claim/connection_test.go b/internal/controller/apiextensions/claim/connection_test.go index 72b46c578..e4fc314f6 100644 --- a/internal/controller/apiextensions/claim/connection_test.go +++ b/internal/controller/apiextensions/claim/connection_test.go @@ -34,9 +34,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" ) -var ( - _ ConnectionPropagator = &APIConnectionPropagator{} -) +var _ ConnectionPropagator = &APIConnectionPropagator{} func TestPropagateConnection(t *testing.T) { errBoom := errors.New("boom") diff --git a/internal/controller/apiextensions/claim/object.go b/internal/controller/apiextensions/claim/object.go index 3051f8e68..0c4596883 100644 --- a/internal/controller/apiextensions/claim/object.go +++ b/internal/controller/apiextensions/claim/object.go @@ -61,21 +61,21 @@ type mergeConfig struct { srcfilter []string } -// withMergeOptions allows custom mergo.Config options +// withMergeOptions allows custom mergo.Config options. func withMergeOptions(opts ...func(*mergo.Config)) func(*mergeConfig) { return func(config *mergeConfig) { config.mergeOptions = opts } } -// withSrcFilter filters supplied keys from src map before merging +// withSrcFilter filters supplied keys from src map before merging. func withSrcFilter(keys ...string) func(*mergeConfig) { return func(config *mergeConfig) { config.srcfilter = keys } } -// merge a src map into dst map +// merge a src map into dst map. func merge(dst, src any, opts ...func(*mergeConfig)) error { if dst == nil || src == nil { // Nothing available to merge if dst or src are nil. diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index a6c0c485f..6f3196bb3 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -162,10 +162,10 @@ type DefaultsSelector interface { SelectDefaults(ctx context.Context, cm resource.CompositeClaim) error } -// A DefaultsSelectorFn is responsible for copying default values from the CompositeResourceDefinition +// A DefaultsSelectorFn is responsible for copying default values from the CompositeResourceDefinition. type DefaultsSelectorFn func(ctx context.Context, cm resource.CompositeClaim) error -// SelectDefaults copies default values from the XRD if necessary +// SelectDefaults copies default values from the XRD if necessary. func (fn DefaultsSelectorFn) SelectDefaults(ctx context.Context, cm resource.CompositeClaim) error { return fn(ctx, cm) } diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 6b0612997..14554b7d8 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -36,7 +36,7 @@ import ( "github.com/crossplane/crossplane/internal/xcrd" ) -// Error strings +// Error strings. const ( errCreatePatch = "cannot create patch" errPatchFieldManagers = "cannot patch field managers" diff --git a/internal/controller/apiextensions/composite/api_test.go b/internal/controller/apiextensions/composite/api_test.go index f2ff858ea..d8b3fbf6e 100644 --- a/internal/controller/apiextensions/composite/api_test.go +++ b/internal/controller/apiextensions/composite/api_test.go @@ -705,7 +705,8 @@ func TestSelectorResolver(t *testing.T) { } t.Errorf("wrong query") return nil - }}, + }, + }, cp: &fake.Composite{ CompositionSelector: fake.CompositionSelector{Sel: sel}, }, diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 8a9ac5274..6b2595ab8 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -415,7 +415,8 @@ func TestFunctionCompose(t *testing.T) { "status": map[string]any{ "widgets": 42, }, - })}, + }), + }, } return &v1beta1.RunFunctionResponse{Desired: d}, nil }), @@ -846,7 +847,6 @@ func TestFunctionCompose(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := NewFunctionComposer(tc.params.kube, tc.params.r, tc.params.o...) res, err := c.Compose(tc.args.ctx, tc.args.xr, tc.args.req) @@ -1067,17 +1067,18 @@ func TestGetComposedResources(t *testing.T) { }, }, want: want{ - ors: ComposedResourceStates{"cool-resource": ComposedResourceState{ - ConnectionDetails: details, - Resource: func() resource.Composed { - cd := composed.New() - cd.SetAPIVersion("example.org/v1") - cd.SetKind("Composed") - cd.SetName("cool-resource-42") - SetCompositionResourceName(cd, "cool-resource") - return cd - }(), - }, + ors: ComposedResourceStates{ + "cool-resource": ComposedResourceState{ + ConnectionDetails: details, + Resource: func() resource.Composed { + cd := composed.New() + cd.SetAPIVersion("example.org/v1") + cd.SetKind("Composed") + cd.SetName("cool-resource-42") + SetCompositionResourceName(cd, "cool-resource") + return cd + }(), + }, }, }, }, @@ -1085,7 +1086,6 @@ func TestGetComposedResources(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - g := NewExistingComposedResourceObserver(tc.params.c, tc.params.f) ors, err := g.ObserveComposedResources(tc.args.ctx, tc.args.xr) @@ -1323,7 +1323,6 @@ func TestGarbageCollectComposedResources(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - d := NewDeletingComposedResourceGarbageCollector(tc.params.client) err := d.GarbageCollectComposedResources(tc.args.ctx, tc.args.owner, tc.args.observed, tc.args.desired) @@ -1393,13 +1392,11 @@ func TestUpdateResourceRefs(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - UpdateResourceRefs(tc.args.xr, tc.args.drs) if diff := cmp.Diff(tc.want.xr, tc.args.xr); diff != "" { t.Errorf("\n%s\nUpdateResourceRefs(...): -want, +got:\n%s", tc.reason, diff) } - }) } } diff --git a/internal/controller/apiextensions/composite/composition_patches.go b/internal/controller/apiextensions/composite/composition_patches.go index 568d2b967..85d4e8512 100644 --- a/internal/controller/apiextensions/composite/composition_patches.go +++ b/internal/controller/apiextensions/composite/composition_patches.go @@ -95,7 +95,7 @@ func ApplyToObjects(p v1.Patch, cp, cd runtime.Object, only ...v1.PatchType) err return errors.Errorf(errFmtInvalidPatchType, p.Type) } -// filterPatch returns true if patch should be filtered (not applied) +// filterPatch returns true if patch should be filtered (not applied). func filterPatch(p v1.Patch, only ...v1.PatchType) bool { // filter does not apply if not set if len(only) == 0 { diff --git a/internal/controller/apiextensions/composite/composition_patches_test.go b/internal/controller/apiextensions/composite/composition_patches_test.go index ef470ad6b..b4cf87f78 100644 --- a/internal/controller/apiextensions/composite/composition_patches_test.go +++ b/internal/controller/apiextensions/composite/composition_patches_test.go @@ -460,7 +460,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -502,7 +503,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -545,7 +547,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -635,7 +638,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -682,7 +686,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtRequiredField, "Combine", v1.PatchTypeCombineFromComposite), }, @@ -729,7 +734,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtRequiredField, "Combine", v1.PatchTypeCombineFromEnvironment), }, @@ -783,7 +789,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtCombineConfigMissing, v1.CombineStrategyString), }, @@ -835,7 +842,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.New(errCombineRequiresVariables), }, @@ -894,7 +902,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -950,7 +959,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "Test": "blah", "destination": "foo-bar", - }}, + }, + }, }, err: nil, }, @@ -1006,7 +1016,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "source1": "foo", "source2": "bar", - }}, + }, + }, }, err: nil, }, @@ -1062,7 +1073,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "source1": "foo", "source2": "bar", - }}, + }, + }, }, err: nil, }, diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index cc8b1eedb..99aba85da 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -41,7 +41,7 @@ import ( "github.com/crossplane/crossplane/internal/names" ) -// Error strings +// Error strings. const ( errGetComposed = "cannot get composed resource" errGCComposed = "cannot garbage collect composed resource" @@ -354,7 +354,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // toXRPatchesFromTAs selects patches defined in composed templates, // whose type is one of the XR-targeting patches -// (e.g. v1.PatchTypeToCompositeFieldPath or v1.PatchTypeCombineToComposite) +// (e.g. v1.PatchTypeToCompositeFieldPath or v1.PatchTypeCombineToComposite). func toXRPatchesFromTAs(tas []TemplateAssociation) []v1.Patch { filtered := make([]v1.Patch, 0, len(tas)) for _, ta := range tas { @@ -364,7 +364,7 @@ func toXRPatchesFromTAs(tas []TemplateAssociation) []v1.Patch { return filtered } -// filterPatches selects patches whose type belong to the list onlyTypes +// filterPatches selects patches whose type belong to the list onlyTypes. func filterPatches(pas []v1.Patch, onlyTypes ...v1.PatchType) []v1.Patch { filtered := make([]v1.Patch, 0, len(pas)) include := make(map[v1.PatchType]bool) diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index 464b4351f..fe0d58476 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -474,7 +474,6 @@ func TestPTCompose(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := NewPTComposer(tc.params.kube, tc.params.o...) res, err := c.Compose(tc.args.ctx, tc.args.xr, tc.args.req) @@ -485,7 +484,6 @@ func TestPTCompose(t *testing.T) { if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nCompose(...): -want error, +got error:\n%s", tc.reason, diff) } - }) } } diff --git a/internal/controller/apiextensions/composite/composition_render.go b/internal/controller/apiextensions/composite/composition_render.go index f43c5c80a..2cb2cb2ff 100644 --- a/internal/controller/apiextensions/composite/composition_render.go +++ b/internal/controller/apiextensions/composite/composition_render.go @@ -26,7 +26,7 @@ import ( "github.com/crossplane/crossplane/internal/xcrd" ) -// Error strings +// Error strings. const ( errUnmarshalJSON = "cannot unmarshal JSON data" errMarshalProtoStruct = "cannot marshal protobuf Struct to JSON" diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 82b8e1291..f49317cf3 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -456,7 +456,6 @@ func GetConversionFunc(t *v1.ConvertTransform, from v1.TransformIOType) (func(an // may return an error. var conversions = map[conversionPair]func(any) (any, error){ {from: v1.TransformIOTypeString, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseInt(i.(string), 10, 64) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { diff --git a/internal/controller/apiextensions/composite/composition_transforms_test.go b/internal/controller/apiextensions/composite/composition_transforms_test.go index 609a46eb0..228e3311d 100644 --- a/internal/controller/apiextensions/composite/composition_transforms_test.go +++ b/internal/controller/apiextensions/composite/composition_transforms_test.go @@ -646,7 +646,6 @@ func TestMathResolve(t *testing.T) { } func TestStringResolve(t *testing.T) { - type args struct { stype v1.StringTransformType fmts *string @@ -1057,8 +1056,8 @@ func TestStringResolve(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - - tr := v1.StringTransform{Type: tc.stype, + tr := v1.StringTransform{ + Type: tc.stype, Format: tc.fmts, Convert: tc.convert, Trim: tc.trim, diff --git a/internal/controller/apiextensions/composite/connection.go b/internal/controller/apiextensions/composite/connection.go index 857e8cda9..e66484213 100644 --- a/internal/controller/apiextensions/composite/connection.go +++ b/internal/controller/apiextensions/composite/connection.go @@ -107,7 +107,7 @@ type SecretStoreConnectionPublisher struct { filter []string } -// NewSecretStoreConnectionPublisher returns a SecretStoreConnectionPublisher +// NewSecretStoreConnectionPublisher returns a SecretStoreConnectionPublisher. func NewSecretStoreConnectionPublisher(p managed.ConnectionPublisher, filter []string) *SecretStoreConnectionPublisher { return &SecretStoreConnectionPublisher{ publisher: p, diff --git a/internal/controller/apiextensions/composite/connection_test.go b/internal/controller/apiextensions/composite/connection_test.go index 8aa540ec5..0acc3415b 100644 --- a/internal/controller/apiextensions/composite/connection_test.go +++ b/internal/controller/apiextensions/composite/connection_test.go @@ -456,7 +456,6 @@ func TestExtractConfigsFromTemplate(t *testing.T) { if diff := cmp.Diff(tc.want.cfgs, cfgs); diff != "" { t.Errorf("\n%s\nExtractConfigsFromTemplate(...): -want, +got:\n%s", tc.reason, diff) } - }) } } diff --git a/internal/controller/apiextensions/composite/environment_fetcher.go b/internal/controller/apiextensions/composite/environment_fetcher.go index 224404ec2..8a3d4c255 100644 --- a/internal/controller/apiextensions/composite/environment_fetcher.go +++ b/internal/controller/apiextensions/composite/environment_fetcher.go @@ -53,7 +53,7 @@ func (f *NilEnvironmentFetcher) Fetch(_ context.Context, _ EnvironmentFetcherReq return nil, nil } -// NewAPIEnvironmentFetcher creates a new APIEnvironmentFetcher +// NewAPIEnvironmentFetcher creates a new APIEnvironmentFetcher. func NewAPIEnvironmentFetcher(kube client.Client) *APIEnvironmentFetcher { return &APIEnvironmentFetcher{ kube: kube, diff --git a/internal/controller/apiextensions/composite/environment_selector.go b/internal/controller/apiextensions/composite/environment_selector.go index c9a6a1f80..3d035f793 100644 --- a/internal/controller/apiextensions/composite/environment_selector.go +++ b/internal/controller/apiextensions/composite/environment_selector.go @@ -60,7 +60,7 @@ func (s *NoopEnvironmentSelector) SelectEnvironment(_ context.Context, _ resourc return nil } -// NewAPIEnvironmentSelector creates a new APIEnvironmentSelector +// NewAPIEnvironmentSelector creates a new APIEnvironmentSelector. func NewAPIEnvironmentSelector(kube client.Client) *APIEnvironmentSelector { return &APIEnvironmentSelector{ kube: kube, diff --git a/internal/controller/apiextensions/composite/environment_selector_test.go b/internal/controller/apiextensions/composite/environment_selector_test.go index e4a32f17c..ee869bd19 100644 --- a/internal/controller/apiextensions/composite/environment_selector_test.go +++ b/internal/controller/apiextensions/composite/environment_selector_test.go @@ -693,13 +693,11 @@ func TestSelect(t *testing.T) { APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-2", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-3", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), @@ -984,13 +982,11 @@ func TestSelect(t *testing.T) { APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-2", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-3", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), diff --git a/internal/controller/apiextensions/composite/fuzz_test.go b/internal/controller/apiextensions/composite/fuzz_test.go index 0844d869d..b90c689ce 100644 --- a/internal/controller/apiextensions/composite/fuzz_test.go +++ b/internal/controller/apiextensions/composite/fuzz_test.go @@ -32,9 +32,7 @@ import ( pkgmetav1alpha1 "github.com/crossplane/crossplane/apis/pkg/meta/v1alpha1" ) -var ( - fuzzScheme = runtime.NewScheme() -) +var fuzzScheme = runtime.NewScheme() func init() { if err := pkgmetav1alpha1.SchemeBuilder.AddToScheme(fuzzScheme); err != nil { @@ -48,7 +46,7 @@ func init() { } } -// Adds a type to the patch +// Adds a type to the patch. func addType(p *v1.Patch, i int) { chooseType := i % 5 switch chooseType { @@ -88,7 +86,7 @@ func FuzzPatchApply(f *testing.F) { }) } -// Adds a type to the transform +// Adds a type to the transform. func addTransformType(t *v1.Transform, i int) error { chooseType := i % 4 switch chooseType { diff --git a/internal/controller/apiextensions/composite/ready.go b/internal/controller/apiextensions/composite/ready.go index c5744c720..50b3fcdd8 100644 --- a/internal/controller/apiextensions/composite/ready.go +++ b/internal/controller/apiextensions/composite/ready.go @@ -30,7 +30,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -// Error strings +// Error strings. const ( errInvalidCheck = "invalid" errPaveObject = "cannot lookup field paths in supplied object" @@ -60,7 +60,7 @@ const ( ) // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // Type indicates the type of probe you'd like to use. Type ReadinessCheckType @@ -79,7 +79,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. Type xpv1.ConditionType diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index d33f7166e..05dad5837 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -54,7 +54,7 @@ const ( finalizer = "composite.apiextensions.crossplane.io" ) -// Error strings +// Error strings. const ( errGet = "cannot get composite resource" errUpdate = "cannot update composite resource" diff --git a/internal/controller/apiextensions/composition/reconciler.go b/internal/controller/apiextensions/composition/reconciler.go index 6adef545b..9999eb27b 100644 --- a/internal/controller/apiextensions/composition/reconciler.go +++ b/internal/controller/apiextensions/composition/reconciler.go @@ -46,7 +46,7 @@ const ( timeout = 2 * time.Minute ) -// Error strings +// Error strings. const ( errGet = "cannot get Composition" errListRevs = "cannot list CompositionRevisions" diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index c47cfb63f..59166a641 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -596,7 +596,8 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }}, + MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }, + }, ), }, }, diff --git a/internal/controller/apiextensions/offered/watch_test.go b/internal/controller/apiextensions/offered/watch_test.go index c445b002d..2ee2fe2a8 100644 --- a/internal/controller/apiextensions/offered/watch_test.go +++ b/internal/controller/apiextensions/offered/watch_test.go @@ -32,9 +32,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForClaim{} -) +var _ handler.EventHandler = &EnqueueRequestForClaim{} func TestOffersClaim(t *testing.T) { cases := map[string]struct { diff --git a/internal/controller/pkg/manager/reconciler_test.go b/internal/controller/pkg/manager/reconciler_test.go index b9da46017..2313bb1d7 100644 --- a/internal/controller/pkg/manager/reconciler_test.go +++ b/internal/controller/pkg/manager/reconciler_test.go @@ -53,6 +53,7 @@ func NewMockRevisionFn(hash string, err error) func() (string, error) { return hash, err } } + func (m *MockRevisioner) Revision(context.Context, v1.Package) (string, error) { return m.MockRevision() } diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index 47076903b..cc08fb3e9 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -59,7 +59,7 @@ import ( const ( reconcileTimeout = 3 * time.Minute - // the max size of a package parsed by the parser + // the max size of a package parsed by the parser. maxPackageSize = 200 << 20 // 100 MB ) @@ -444,7 +444,6 @@ func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { // NewReconciler creates a new package revision reconciler. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - r := &Reconciler{ client: mgr.GetClient(), cache: xpkg.NewNopCache(), diff --git a/internal/controller/pkg/revision/runtime_override_options.go b/internal/controller/pkg/revision/runtime_override_options.go index 1ec9b6764..f9aba51ba 100644 --- a/internal/controller/pkg/revision/runtime_override_options.go +++ b/internal/controller/pkg/revision/runtime_override_options.go @@ -333,8 +333,7 @@ func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOver d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, cc.Spec.Volumes...) } if len(cc.Spec.VolumeMounts) > 0 { - d.Spec.Template.Spec.Containers[0].VolumeMounts = - append(d.Spec.Template.Spec.Containers[0].VolumeMounts, cc.Spec.VolumeMounts...) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, cc.Spec.VolumeMounts...) } } } @@ -415,12 +414,10 @@ func mountTLSSecret(secret, volName, mountPath, envName string, d *appsv1.Deploy ReadOnly: true, MountPath: mountPath, } - d.Spec.Template.Spec.Containers[0].VolumeMounts = - append(d.Spec.Template.Spec.Containers[0].VolumeMounts, vm) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, vm) envs := []corev1.EnvVar{ {Name: envName, Value: mountPath}, } - d.Spec.Template.Spec.Containers[0].Env = - append(d.Spec.Template.Spec.Containers[0].Env, envs...) + d.Spec.Template.Spec.Containers[0].Env = append(d.Spec.Template.Spec.Containers[0].Env, envs...) } diff --git a/internal/controller/pkg/revision/runtime_override_options_test.go b/internal/controller/pkg/revision/runtime_override_options_test.go index 5838bafee..bfe4300b4 100644 --- a/internal/controller/pkg/revision/runtime_override_options_test.go +++ b/internal/controller/pkg/revision/runtime_override_options_test.go @@ -169,7 +169,8 @@ func TestDeploymentWithRuntimeContainer(t *testing.T) { }, { Name: "another-one", - }, { + }, + { Name: runtimeContainerName, }, }, diff --git a/internal/controller/pkg/revision/runtime_provider_test.go b/internal/controller/pkg/revision/runtime_provider_test.go index 12203607c..467c99f36 100644 --- a/internal/controller/pkg/revision/runtime_provider_test.go +++ b/internal/controller/pkg/revision/runtime_provider_test.go @@ -46,9 +46,7 @@ const ( xpManagedSA = "xp-managed-sa" ) -var ( - errBoom = errors.New("boom") -) +var errBoom = errors.New("boom") func TestProviderPreHook(t *testing.T) { type args struct { diff --git a/internal/controller/pkg/revision/watch_test.go b/internal/controller/pkg/revision/watch_test.go index 8eaece676..f64e2a582 100644 --- a/internal/controller/pkg/revision/watch_test.go +++ b/internal/controller/pkg/revision/watch_test.go @@ -35,9 +35,7 @@ import ( "github.com/crossplane/crossplane/apis/pkg/v1alpha1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForReferencingProviderRevisions{} -) +var _ handler.EventHandler = &EnqueueRequestForReferencingProviderRevisions{} type addFn func(item any) diff --git a/internal/controller/rbac/definition/reconciler.go b/internal/controller/rbac/definition/reconciler.go index 6bac28a68..fef32ff2e 100644 --- a/internal/controller/rbac/definition/reconciler.go +++ b/internal/controller/rbac/definition/reconciler.go @@ -152,7 +152,6 @@ type Reconciler struct { // Reconcile a CompositeResourceDefinition by creating a series of opinionated // ClusterRoles that may be bound to allow access to the resources it defines. func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/rbac/namespace/reconciler.go b/internal/controller/rbac/namespace/reconciler.go index 9d2084eaf..fdeecb332 100644 --- a/internal/controller/rbac/namespace/reconciler.go +++ b/internal/controller/rbac/namespace/reconciler.go @@ -154,7 +154,6 @@ type Reconciler struct { // Reconcile a Namespace by creating a series of opinionated Roles that may be // bound to allow access to resources within that namespace. func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/rbac/namespace/roles_test.go b/internal/controller/rbac/namespace/roles_test.go index 9e2b484b8..991d15159 100644 --- a/internal/controller/rbac/namespace/roles_test.go +++ b/internal/controller/rbac/namespace/roles_test.go @@ -115,7 +115,6 @@ func TestCRSelector(t *testing.T) { } }) } - } func TestRenderClusterRoles(t *testing.T) { diff --git a/internal/controller/rbac/namespace/watch.go b/internal/controller/rbac/namespace/watch.go index 1cc802747..b9b42cd65 100644 --- a/internal/controller/rbac/namespace/watch.go +++ b/internal/controller/rbac/namespace/watch.go @@ -88,7 +88,6 @@ func (e *EnqueueRequestForNamespaces) add(ctx context.Context, obj runtime.Objec for _, ns := range l.Items { queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.GetName()}}) } - } func aggregates(obj metav1.Object) bool { diff --git a/internal/controller/rbac/namespace/watch_test.go b/internal/controller/rbac/namespace/watch_test.go index 2872db74a..9aea56cea 100644 --- a/internal/controller/rbac/namespace/watch_test.go +++ b/internal/controller/rbac/namespace/watch_test.go @@ -34,9 +34,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" ) -var ( - _ handler.EventHandler = &EnqueueRequestForNamespaces{} -) +var _ handler.EventHandler = &EnqueueRequestForNamespaces{} type addFn func(item any) diff --git a/internal/controller/rbac/provider/binding/reconciler.go b/internal/controller/rbac/provider/binding/reconciler.go index 0d6333413..290733b9c 100644 --- a/internal/controller/rbac/provider/binding/reconciler.go +++ b/internal/controller/rbac/provider/binding/reconciler.go @@ -135,7 +135,6 @@ type Reconciler struct { // Reconcile a ProviderRevision by creating a ClusterRoleBinding that binds a // provider's service account to its system ClusterRole. func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcile methods are often very complex. Be wary. - log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index b2909b5f2..4d2ce798e 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -115,7 +115,8 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { wrh := &EnqueueRequestForAllRevisionsWithRequests{ client: mgr.GetClient(), - clusterRoleName: o.AllowClusterRole} + clusterRoleName: o.AllowClusterRole, + } sfh := &EnqueueRequestForAllRevisionsInFamily{ client: mgr.GetClient(), @@ -373,7 +374,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func DefinedResources(refs []xpv1.TypedReference) []Resource { out := make([]Resource, 0, len(refs)) for _, ref := range refs { - // This would only return an error if the APIVersion contained more than // one "/". This should be impossible, but if it somehow happens we'll // just skip this resource since it can't be a CRD. diff --git a/internal/controller/rbac/provider/roles/watch_test.go b/internal/controller/rbac/provider/roles/watch_test.go index fd00154ea..d1abf1b89 100644 --- a/internal/controller/rbac/provider/roles/watch_test.go +++ b/internal/controller/rbac/provider/roles/watch_test.go @@ -35,9 +35,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/pkg/v1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForAllRevisionsWithRequests{} -) +var _ handler.EventHandler = &EnqueueRequestForAllRevisionsWithRequests{} type addFn func(item any) diff --git a/internal/dag/dag_test.go b/internal/dag/dag_test.go index e4f16816e..787226166 100644 --- a/internal/dag/dag_test.go +++ b/internal/dag/dag_test.go @@ -64,8 +64,10 @@ func toNodes(n []simpleNode) []Node { return nodes } -var _ DAG = &MapDag{} -var _ NewDAGFn = NewMapDag +var ( + _ DAG = &MapDag{} + _ NewDAGFn = NewMapDag +) func sortedFnNop([]simpleNode, []string) error { return nil diff --git a/internal/features/features.go b/internal/features/features.go index 19866bd12..bbb0dd342 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -47,7 +47,7 @@ const ( EnableAlphaClaimSSA feature.Flag = "EnableAlphaClaimSSA" ) -// Beta Feature Flags +// Beta Feature Flags. const ( // EnableBetaCompositionFunctions enables alpha support for composition // functions. See the below design for more details. diff --git a/internal/initializer/cert_generator.go b/internal/initializer/cert_generator.go index 380b75be8..e187a8f2e 100644 --- a/internal/initializer/cert_generator.go +++ b/internal/initializer/cert_generator.go @@ -31,7 +31,7 @@ const ( errGenerateCertificate = "cannot generate tls certificate" ) -// CertificateSigner is the parent's certificate and key that will be used to sign the certificate +// CertificateSigner is the parent's certificate and key that will be used to sign the certificate. type CertificateSigner struct { certificate *x509.Certificate key *rsa.PrivateKey @@ -43,15 +43,13 @@ type CertificateGenerator interface { Generate(*x509.Certificate, *CertificateSigner) (key []byte, crt []byte, err error) } -var ( - pkixName = pkix.Name{ - CommonName: "Crossplane", - Organization: []string{"Crossplane"}, - Country: []string{"Earth"}, - Province: []string{"Earth"}, - Locality: []string{"Earth"}, - } -) +var pkixName = pkix.Name{ + CommonName: "Crossplane", + Organization: []string{"Crossplane"}, + Country: []string{"Earth"}, + Province: []string{"Earth"}, + Locality: []string{"Earth"}, +} // NewCertGenerator returns a new CertGenerator. func NewCertGenerator() *CertGenerator { diff --git a/internal/initializer/crds_migrator.go b/internal/initializer/crds_migrator.go index 0a7987dc8..858f52733 100644 --- a/internal/initializer/crds_migrator.go +++ b/internal/initializer/crds_migrator.go @@ -67,7 +67,7 @@ func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { break } } - var resources = unstructured.UnstructuredList{} + resources := unstructured.UnstructuredList{} resources.SetGroupVersionKind(schema.GroupVersionKind{ Group: crd.Spec.Group, Version: storageVersion, diff --git a/internal/initializer/tls.go b/internal/initializer/tls.go index bd3a36e85..04330c01f 100644 --- a/internal/initializer/tls.go +++ b/internal/initializer/tls.go @@ -50,16 +50,16 @@ const ( const ( // RootCACertSecretName is the name of the secret that will store CA certificates and rest of the - // certificates created per entities will be signed by this CA + // certificates created per entities will be signed by this CA. RootCACertSecretName = "crossplane-root-ca" - // SecretKeyCACert is the secret key of CA certificate + // SecretKeyCACert is the secret key of CA certificate. SecretKeyCACert = "ca.crt" ) // TLSCertificateGenerator is an initializer step that will find the given secret // and fill its tls.crt, tls.key and ca.crt fields to be used for External Secret -// Store plugins +// Store plugins. type TLSCertificateGenerator struct { namespace string caSecretName string @@ -75,14 +75,14 @@ type TLSCertificateGenerator struct { // TLSCertificateGeneratorOption is used to configure TLSCertificateGenerator behavior. type TLSCertificateGeneratorOption func(*TLSCertificateGenerator) -// TLSCertificateGeneratorWithLogger returns an TLSCertificateGeneratorOption that configures logger +// TLSCertificateGeneratorWithLogger returns an TLSCertificateGeneratorOption that configures logger. func TLSCertificateGeneratorWithLogger(log logging.Logger) TLSCertificateGeneratorOption { return func(g *TLSCertificateGenerator) { g.log = log } } -// TLSCertificateGeneratorWithOwner returns an TLSCertificateGeneratorOption that sets owner reference +// TLSCertificateGeneratorWithOwner returns an TLSCertificateGeneratorOption that sets owner reference. func TLSCertificateGeneratorWithOwner(owner []metav1.OwnerReference) TLSCertificateGeneratorOption { return func(g *TLSCertificateGenerator) { g.owner = owner diff --git a/internal/initializer/tls_test.go b/internal/initializer/tls_test.go index 011524a50..126cbad60 100644 --- a/internal/initializer/tls_test.go +++ b/internal/initializer/tls_test.go @@ -565,7 +565,6 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { "OnlyServerCertificateSuccessfulGeneratedServerCert": { reason: "It should be successful if the server certificate is generated and put into the Secret.", args: args{ - kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { @@ -709,7 +708,6 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { "OnlyClientCertificateSuccessfulGeneratedClientCert": { reason: "It should be successful if the client certificate is generated and put into the Secret.", args: args{ - kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 4b1334944..da7c029ac 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -55,8 +55,10 @@ func userAgentValidator(userAgent string) requestValidationFn { } } -var _ http.RoundTripper = &UserAgent{} -var _ http.RoundTripper = &validatingRoundTripper{} +var ( + _ http.RoundTripper = &UserAgent{} + _ http.RoundTripper = &validatingRoundTripper{} +) func TestUserAgent(t *testing.T) { cases := map[string]struct { diff --git a/internal/usage/handler.go b/internal/usage/handler.go index 2865c4621..3bc59d576 100644 --- a/internal/usage/handler.go +++ b/internal/usage/handler.go @@ -41,7 +41,7 @@ import ( const ( // InUseIndexKey used to index CRDs by "Kind" and "group", to be used when - // indexing and retrieving needed CRDs + // indexing and retrieving needed CRDs. InUseIndexKey = "inuse.apiversion.kind.name" // Error strings. diff --git a/internal/xcrd/crd_test.go b/internal/xcrd/crd_test.go index 55db8df22..91323d1fc 100644 --- a/internal/xcrd/crd_test.go +++ b/internal/xcrd/crd_test.go @@ -1030,7 +1030,6 @@ func TestForCompositeResource(t *testing.T) { Type: "object", Description: "", Properties: map[string]extv1.JSONSchemaProps{ - // From CompositeResourceStatusProps() "conditions": { Description: "Conditions of the resource.", @@ -1979,8 +1978,10 @@ func TestForCompositeResourceClaim(t *testing.T) { }, "compositeDeletePolicy": { Type: "string", - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { @@ -2263,8 +2264,10 @@ func TestForCompositeResourceClaim(t *testing.T) { "compositeDeletePolicy": { Type: "string", Default: &extv1.JSON{Raw: []byte(fmt.Sprintf("\"%s\"", defaultPolicy))}, - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { @@ -2569,8 +2572,10 @@ func TestForCompositeResourceClaimEmptyXrd(t *testing.T) { Properties: map[string]extv1.JSONSchemaProps{ "compositeDeletePolicy": { Type: "string", - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { diff --git a/internal/xcrd/schemas.go b/internal/xcrd/schemas.go index 01fee98f3..c54fded53 100644 --- a/internal/xcrd/schemas.go +++ b/internal/xcrd/schemas.go @@ -28,7 +28,7 @@ const ( LabelKeyClaimNamespace = "crossplane.io/claim-namespace" ) -// CompositionRevisionRef should be propagated dynamically +// CompositionRevisionRef should be propagated dynamically. var CompositionRevisionRef = "compositionRevisionRef" // PropagateSpecProps is the list of XRC spec properties to propagate @@ -413,7 +413,7 @@ func CompositeResourceClaimPrinterColumns() []extv1.CustomResourceColumnDefiniti } } -// GetPropFields returns the fields from a map of schema properties +// GetPropFields returns the fields from a map of schema properties. func GetPropFields(props map[string]extv1.JSONSchemaProps) []string { propFields := make([]string, len(props)) i := 0 diff --git a/internal/xfn/function_runner.go b/internal/xfn/function_runner.go index caef9eaaf..3a55331e0 100644 --- a/internal/xfn/function_runner.go +++ b/internal/xfn/function_runner.go @@ -35,7 +35,7 @@ import ( pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) -// Error strings +// Error strings. const ( errListFunctionRevisions = "cannot list FunctionRevisions" errNoActiveRevisions = "cannot find an active FunctionRevision (a FunctionRevision with spec.desiredState: Active)" diff --git a/internal/xpkg/fetch.go b/internal/xpkg/fetch.go index 5b10f3321..d991de98a 100644 --- a/internal/xpkg/fetch.go +++ b/internal/xpkg/fetch.go @@ -59,7 +59,7 @@ type K8sFetcher struct { userAgent string } -// FetcherOpt can be used to add optional parameters to NewK8sFetcher +// FetcherOpt can be used to add optional parameters to NewK8sFetcher. type FetcherOpt func(k *K8sFetcher) error // WithCustomCA is a FetcherOpt that can be used to add a custom CA bundle to a K8sFetcher. diff --git a/internal/xpkg/fuzz_test.go b/internal/xpkg/fuzz_test.go index 296e55df3..ce989c349 100644 --- a/internal/xpkg/fuzz_test.go +++ b/internal/xpkg/fuzz_test.go @@ -48,7 +48,7 @@ func FuzzFindXpkgInDir(f *testing.F) { t.Skip() } - if err = afero.WriteFile(fs, fname, fcontents, 0777); err != nil { + if err = afero.WriteFile(fs, fname, fcontents, 0o777); err != nil { t.Skip() } } @@ -56,5 +56,4 @@ func FuzzFindXpkgInDir(f *testing.F) { _, _ = FindXpkgInDir(fs, "/") _, _ = ParseNameFromMeta(fs, "/") }) - } diff --git a/internal/xpkg/upbound/config/config.go b/internal/xpkg/upbound/config/config.go index fa194acfd..502d2dc29 100644 --- a/internal/xpkg/upbound/config/config.go +++ b/internal/xpkg/upbound/config/config.go @@ -88,7 +88,7 @@ const ( TokenProfileType ProfileType = "token" ) -// A Profile is a set of credentials +// A Profile is a set of credentials. type Profile struct { // ID is either a username, email, or token. ID string `json:"id"` diff --git a/internal/xpkg/upbound/config/source.go b/internal/xpkg/upbound/config/source.go index b1d0fa28f..225cf9c5f 100644 --- a/internal/xpkg/upbound/config/source.go +++ b/internal/xpkg/upbound/config/source.go @@ -88,10 +88,10 @@ func (src *FSSource) Initialize() error { if !os.IsNotExist(err) { return err } - if err := src.fs.MkdirAll(filepath.Dir(src.path), 0755); err != nil { + if err := src.fs.MkdirAll(filepath.Dir(src.path), 0o755); err != nil { return err } - f, err := src.fs.OpenFile(src.path, os.O_CREATE, 0600) + f, err := src.fs.OpenFile(src.path, os.O_CREATE, 0o600) if err != nil { return err } @@ -123,7 +123,7 @@ func (src *FSSource) GetConfig() (*Config, error) { // UpdateConfig updates the Config in the filesystem. func (src *FSSource) UpdateConfig(c *Config) error { - f, err := src.fs.OpenFile(src.path, os.O_RDWR|os.O_TRUNC, 0600) + f, err := src.fs.OpenFile(src.path, os.O_RDWR|os.O_TRUNC, 0o600) if err != nil { return err } diff --git a/internal/xpkg/upbound/config/source_test.go b/internal/xpkg/upbound/config/source_test.go index ce66eca82..5a8e81d10 100644 --- a/internal/xpkg/upbound/config/source_test.go +++ b/internal/xpkg/upbound/config/source_test.go @@ -27,8 +27,10 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" ) -var _ Source = &FSSource{} -var _ Source = &MockSource{} +var ( + _ Source = &FSSource{} + _ Source = &MockSource{} +) // TODO(hasheddan): a mock afero.Fs could increase test coverage here with // simulated failed file opens and writes. @@ -107,7 +109,7 @@ func TestGetConfig(t *testing.T) { func(f *FSSource) { f.path = "/.up/config.json" fs := afero.NewMemMapFs() - file, _ := fs.OpenFile("/.up/config.json", os.O_CREATE, 0600) + file, _ := fs.OpenFile("/.up/config.json", os.O_CREATE, 0o600) defer file.Close() b, _ := json.Marshal(testConf) //nolint:errchkjson // marshalling should not fail _, _ = file.Write(b) diff --git a/internal/xpkg/upbound/context.go b/internal/xpkg/upbound/context.go index 81d589c88..c71a166ea 100644 --- a/internal/xpkg/upbound/context.go +++ b/internal/xpkg/upbound/context.go @@ -56,14 +56,14 @@ const ( // Flags are common flags used by commands that interact with Upbound. type Flags struct { // Keep sorted alphabetically. - Account string `short:"a" env:"UP_ACCOUNT" help:"Account used to execute command." json:"account,omitempty"` - Domain *url.URL `env:"UP_DOMAIN" default:"https://upbound.io" help:"Root Upbound domain." json:"domain,omitempty"` + Account string `env:"UP_ACCOUNT" help:"Account used to execute command." json:"account,omitempty" short:"a"` + Domain *url.URL `default:"https://upbound.io" env:"UP_DOMAIN" help:"Root Upbound domain." json:"domain,omitempty"` InsecureSkipTLSVerify bool `env:"UP_INSECURE_SKIP_TLS_VERIFY" help:"[INSECURE] Skip verifying TLS certificates." json:"insecureSkipTLSVerify,omitempty"` - Profile string `env:"UP_PROFILE" help:"Profile used to execute command." predictor:"profiles" json:"profile,omitempty"` + Profile string `env:"UP_PROFILE" help:"Profile used to execute command." json:"profile,omitempty" predictor:"profiles"` // Hidden flags. - APIEndpoint *url.URL `env:"OVERRIDE_API_ENDPOINT" hidden:"" name:"override-api-endpoint" help:"Overrides the default API endpoint." json:"apiEndpoint,omitempty"` - RegistryEndpoint *url.URL `env:"OVERRIDE_REGISTRY_ENDPOINT" hidden:"" name:"override-registry-endpoint" help:"Overrides the default registry endpoint." json:"registryEndpoint,omitempty"` + APIEndpoint *url.URL `env:"OVERRIDE_API_ENDPOINT" help:"Overrides the default API endpoint." hidden:"" json:"apiEndpoint,omitempty" name:"override-api-endpoint"` + RegistryEndpoint *url.URL `env:"OVERRIDE_REGISTRY_ENDPOINT" help:"Overrides the default registry endpoint." hidden:"" json:"registryEndpoint,omitempty" name:"override-registry-endpoint"` } // Context includes common data that Upbound consumers may utilize. @@ -86,7 +86,7 @@ type Context struct { fs afero.Fs } -// Option modifies a Context +// Option modifies a Context. type Option func(*Context) // AllowMissingProfile indicates that Context should still be returned even if a @@ -185,10 +185,11 @@ func (c *Context) BuildSDKConfig() (*up.Config, error) { return nil, err } if c.Profile.Session != "" { - cj.SetCookies(c.APIEndpoint, []*http.Cookie{{ - Name: CookieName, - Value: c.Profile.Session, - }, + cj.SetCookies(c.APIEndpoint, []*http.Cookie{ + { + Name: CookieName, + Value: c.Profile.Session, + }, }) } tr := &http.Transport{ diff --git a/internal/xpkg/upbound/context_test.go b/internal/xpkg/upbound/context_test.go index fd129e83c..7d4b73d07 100644 --- a/internal/xpkg/upbound/context_test.go +++ b/internal/xpkg/upbound/context_test.go @@ -81,7 +81,7 @@ func withConfig(config string) Option { return func(ctx *Context) { // establish fs and create config.json fs := afero.NewMemMapFs() - fs.MkdirAll(filepath.Dir("/.up/"), 0755) + fs.MkdirAll(filepath.Dir("/.up/"), 0o755) f, _ := fs.Create("/.up/config.json") f.WriteString(config) diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 7cd2c4d25..4b6f58a6a 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -90,7 +90,6 @@ func (v *Validator) validateEnvironmentPatchesWithSchemas(ctx context.Context, c compositeResGVK: compositeResGVK, }), field.NewPath("spec").Child("environment", "patches").Index(i)); err != nil { errs = append(errs, err) - } } return errs @@ -427,7 +426,6 @@ func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segmen return validateFieldPathSegmentField(parent.AdditionalProperties.Schema, segment) } return nil, errors.Errorf(errFmtFieldInvalid, segment.Field) - } return &prop, nil } diff --git a/pkg/validation/apiextensions/v1/composition/patches_test.go b/pkg/validation/apiextensions/v1/composition/patches_test.go index b72cfa1a3..a35de8cf2 100644 --- a/pkg/validation/apiextensions/v1/composition/patches_test.go +++ b/pkg/validation/apiextensions/v1/composition/patches_test.go @@ -366,7 +366,14 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}}}}}}}, + "foo": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, }, "AcceptMetadataLabelsValue": { reason: "Should validate a valid field path", @@ -391,7 +398,14 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}}}}}}}, + "foo": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, }, "AcceptFieldPathXPreserveUnknownFields": { reason: "Should not return an error for an undefined but accepted field path", @@ -404,9 +418,15 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}, + "foo": {Type: "string"}, + }, XPreserveUnknownFields: &[]bool{true}[0], - }}}}}}, + }, + }, + }, + }, + }, + }, }, "AcceptValidArray": { reason: "Should validate arrays properly", @@ -424,7 +444,18 @@ func TestValidateFieldPath(t *testing.T) { Items: &apiextensions.JSONSchemaPropsOrArray{ Schema: &apiextensions.JSONSchemaProps{ Properties: map[string]apiextensions.JSONSchemaProps{ - "bar": {Type: "string"}}}}}}}}}}}}, + "bar": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "AcceptComplexSchema": { reason: "Should validate properly with complex schema", @@ -475,7 +506,10 @@ func TestValidateFieldPath(t *testing.T) { XPreserveUnknownFields: &[]bool{true}[0], }, }, - }}}}, + }, + }, + }, + }, }, "AcceptAnnotations": { want: want{err: nil, fieldType: "string"}, @@ -896,7 +930,6 @@ func TestGetSchemaForVersion(t *testing.T) { } }) } - } func TestComposedTemplateGetBaseObject(t *testing.T) { diff --git a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go index 9f76fb2cf..07496e5d4 100644 --- a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go +++ b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go @@ -237,7 +237,6 @@ func TestValidateReadinessCheck(t *testing.T) { crd.Spec.Versions[1].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someField"] = extv1.JSONSchemaProps{ Type: "integer", } - }).build()), }, want: want{ diff --git a/pkg/validation/apiextensions/v1/composition/schema.go b/pkg/validation/apiextensions/v1/composition/schema.go index 061284f66..bc4d516c3 100644 --- a/pkg/validation/apiextensions/v1/composition/schema.go +++ b/pkg/validation/apiextensions/v1/composition/schema.go @@ -6,7 +6,7 @@ import ( "github.com/crossplane/crossplane/pkg/validation/internal/schema" ) -// sets all the defaults in the given schema +// sets all the defaults in the given schema. func defaultMetadataSchema(in *apiextensions.JSONSchemaProps) *apiextensions.JSONSchemaProps { out := in if out == nil { @@ -26,6 +26,7 @@ func defaultMetadataSchema(in *apiextensions.JSONSchemaProps) *apiextensions.JSO return out } + func defaultMetadataOnly(metadata *apiextensions.JSONSchemaProps) *apiextensions.JSONSchemaProps { setDefaultType(metadata) setDefaultProperty(metadata, "name", string(schema.KnownJSONTypeString)) diff --git a/pkg/validation/apiextensions/v1/composition/schema_test.go b/pkg/validation/apiextensions/v1/composition/schema_test.go index 5154f859d..07f8c5cc6 100644 --- a/pkg/validation/apiextensions/v1/composition/schema_test.go +++ b/pkg/validation/apiextensions/v1/composition/schema_test.go @@ -16,6 +16,7 @@ func getDefaultMetadataSchema() *apiextensions.JSONSchemaProps { func getDefaultSchema() *apiextensions.JSONSchemaProps { return defaultMetadataSchema(&apiextensions.JSONSchemaProps{}) } + func TestDefaultMetadataSchema(t *testing.T) { type args struct { in *apiextensions.JSONSchemaProps @@ -61,14 +62,18 @@ func TestDefaultMetadataSchema(t *testing.T) { }, "SpecPreserved": { reason: "Other properties should be preserved", - args: args{in: &apiextensions.JSONSchemaProps{ - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "spec": { - Type: string(schema.KnownJSONTypeObject), - AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ - Allows: true, - }}}}, + args: args{ + in: &apiextensions.JSONSchemaProps{ + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "spec": { + Type: string(schema.KnownJSONTypeObject), + AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, }, want: want{ out: &apiextensions.JSONSchemaProps{ @@ -79,7 +84,11 @@ func TestDefaultMetadataSchema(t *testing.T) { Type: string(schema.KnownJSONTypeObject), AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ Allows: true, - }}}}}, + }, + }, + }, + }, + }, }, "MetadataNotOverwrite": { reason: "Other properties should not be overwritten in metadata if specified in the default", @@ -91,7 +100,11 @@ func TestDefaultMetadataSchema(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "name": { Type: string(schema.KnownJSONTypeBoolean), - }}}}}}, + }, + }, + }, + }, + }}, want: want{ out: func() *apiextensions.JSONSchemaProps { s := getDefaultSchema() @@ -106,16 +119,23 @@ func TestDefaultMetadataSchema(t *testing.T) { }, "MetadataPreserved": { reason: "Other properties should be preserved in if not specified in the default", - args: args{in: &apiextensions.JSONSchemaProps{ - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "metadata": { - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "annotations": { - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: string(schema.KnownJSONTypeString)}}}}}}}, + args: args{ + in: &apiextensions.JSONSchemaProps{ + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "metadata": { + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "annotations": { + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "foo": {Type: string(schema.KnownJSONTypeString)}, + }, + }, + }, + }, + }, + }, }, want: want{ out: func() *apiextensions.JSONSchemaProps { diff --git a/pkg/validation/apiextensions/v1/composition/validator_test.go b/pkg/validation/apiextensions/v1/composition/validator_test.go index b8a09b693..64a622e0b 100644 --- a/pkg/validation/apiextensions/v1/composition/validator_test.go +++ b/pkg/validation/apiextensions/v1/composition/validator_test.go @@ -320,7 +320,8 @@ func TestValidatorValidate(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: ptr.To("spec.someField"), ToFieldPath: ptr.To("spec.someOtherField"), - }}}, + }}, + }, ), withPatches(0, v1.Patch{ Type: v1.PatchTypePatchSet, PatchSetName: ptr.To("some-patch-set"), @@ -360,7 +361,8 @@ func TestValidatorValidate(t *testing.T) { }, }, ToFieldPath: ptr.To("spec.someOtherField"), - }}}, + }}, + }, ), withPatches(0, v1.Patch{ Type: v1.PatchTypePatchSet, PatchSetName: ptr.To("some-patch-set"), @@ -660,8 +662,10 @@ func sortFieldErrors() cmp.Option { }) } -const testGroup = "resources.test.com" -const testGroupSingular = "resource.test.com" +const ( + testGroup = "resources.test.com" + testGroupSingular = "resource.test.com" +) func marshalJSON(t *testing.T, obj interface{}) []byte { t.Helper() diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 4d66b8561..038edd2a9 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -54,12 +54,10 @@ func init() { // extensions (i.e. Composition, XRDs, etc). const LabelAreaAPIExtensions = "apiextensions" -var ( - nopList = composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ - APIVersion: "nop.crossplane.io/v1alpha1", - Kind: "NopResource", - })) -) +var nopList = composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "nop.crossplane.io/v1alpha1", + Kind: "NopResource", +})) // TestCompositionMinimal tests Crossplane's Composition functionality, // checking that a claim using a very minimal Composition (with no patches, diff --git a/test/e2e/config/environment.go b/test/e2e/config/environment.go index b683650be..30be698be 100644 --- a/test/e2e/config/environment.go +++ b/test/e2e/config/environment.go @@ -189,7 +189,7 @@ func (e *Environment) HelmInstallBaseCrossplane() env.Func { } // getSuiteInstallOpts returns the helm install options for the specified -// suite, appending additional specified ones +// suite, appending additional specified ones. func (e *Environment) getSuiteInstallOpts(suite string, extra ...helm.Option) []helm.Option { p, ok := e.suites[suite] if !ok { @@ -238,7 +238,7 @@ func WithoutBaseDefaultTestSuite() TestSuiteOpt { } // WithLabelsToSelect sets the provided testSuite to include the provided -// labels, if not already specified by the user +// labels, if not already specified by the user. func WithLabelsToSelect(labels features.Labels) TestSuiteOpt { return func(suite *testSuite) { suite.labelsToSelect = labels diff --git a/test/e2e/environmentconfig_test.go b/test/e2e/environmentconfig_test.go index 959ea9c28..f69fd1661 100644 --- a/test/e2e/environmentconfig_test.go +++ b/test/e2e/environmentconfig_test.go @@ -324,6 +324,7 @@ func TestEnvironmentConfigMultipleMaxMatchNil(t *testing.T) { Feature(), ) } + func TestEnvironmentConfigMultipleMaxMatch1(t *testing.T) { subfolder := "multipleModeMaxMatch1" diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index e7aef5b51..7f620251d 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -76,7 +76,6 @@ func AsFeaturesFunc(fn env.Func) features.Func { } return ctx } - } // HelmUninstall uninstalls a Helm chart. @@ -115,7 +114,7 @@ func EnvFuncs(fns ...env.Func) env.Func { // CreateKindClusterWithConfig create kind cluster of the given name according to // configuration referred via configFilePath. -// The configuration is placed in test context afterward +// The configuration is placed in test context afterward. func CreateKindClusterWithConfig(clusterName, configFilePath string) env.Func { return EnvFuncs( envfuncs.CreateClusterWithConfig(kind.NewProvider(), clusterName, configFilePath), diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index eceda36c1..90cbfe1e5 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -125,7 +125,6 @@ func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) f // to exist within the supplied duration. func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -171,7 +170,6 @@ func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { // within the supplied duration. func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -315,7 +313,6 @@ var NotFound = notFound{} // duration. The supplied 'want' value must cmp.Equal the actual value. func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -432,7 +429,7 @@ func ApplyResources(manager, dir, pattern string, options ...decoder.DecodeOptio type claimCtxKey struct{} // ApplyClaim applies the claim stored in the given folder and file -// and stores it in the test context for later retrival if needed +// and stores it in the test context for later retrival if needed. func ApplyClaim(manager, dir, cm string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { dfs := os.DirFS(dir) @@ -541,7 +538,7 @@ func DeleteResources(dir, pattern string) features.Func { } // ClaimUnderTestMustNotChangeWithin asserts that the claim available in -// the test context does not change within the given time +// the test context does not change within the given time. func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) @@ -574,7 +571,7 @@ func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { } // CompositeUnderTestMustNotChangeWithin asserts that the claim available in -// the test context does not change within the given time +// the test context does not change within the given time. func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) @@ -619,7 +616,7 @@ func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { } // CompositeResourceMustMatchWithin assert that a composite referred by the given file -// must be matched by the given function within the given timeout +// must be matched by the given function within the given timeout. func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { cm := &claim.Unstructured{} diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 47803c775..14fd5a4b6 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -37,29 +37,27 @@ import ( "github.com/crossplane/crossplane/test/e2e/funcs" ) -// TODO(phisco): make it configurable +// TODO(phisco): make it configurable. const namespace = "crossplane-system" -// TODO(phisco): make it configurable +// TODO(phisco): make it configurable. const crdsDir = "cluster/crds" // The caller (e.g. make e2e) must ensure these exist. -// Run `make build e2e-tag-images` to produce them +// Run `make build e2e-tag-images` to produce them. const ( - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. imgcore = "crossplane-e2e/crossplane:latest" ) const ( - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. helmChartDir = "cluster/charts/crossplane" - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. helmReleaseName = "crossplane" ) -var ( - environment = config.NewEnvironmentFromFlags() -) +var environment = config.NewEnvironmentFromFlags() func TestMain(m *testing.M) { // TODO(negz): Global loggers are dumb and klog is dumb. Remove this when diff --git a/test/e2e/utils/cert.go b/test/e2e/utils/cert.go index d327fe80c..ff49f1e30 100644 --- a/test/e2e/utils/cert.go +++ b/test/e2e/utils/cert.go @@ -27,7 +27,7 @@ import ( ) // CreateCert create TLS certificate for given dns name -// and returns CA and key in PEM format, or an error +// and returns CA and key in PEM format, or an error. func CreateCert(dnsName string) (string, string, error) { ca := &x509.Certificate{ SerialNumber: big.NewInt(2019), From 8dd49cadb0c2b7d948e36720df708f487679083e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 18:11:43 -0800 Subject: [PATCH 017/370] Disable some more deprecated linters Signed-off-by: Nic Cope --- .golangci.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 15e7bbb6a..71a266429 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -112,13 +112,18 @@ linters: enable-all: true disable: - # These linters are all deprecated as of golangci-lint v1.49.0. We disable - # them explicitly to avoid the linter logging deprecation warnings. + # These linters are all deprecated. We disable them explicitly to avoid the + # linter logging deprecation warnings. - deadcode - varcheck - scopelint - structcheck - interfacer + - exhaustivestruct + - ifshort + - golint + - maligned + - nosnakecase fast: false From 18f66d7db85fd5b3db8df8d048cde4e357f44e23 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 18:38:21 -0800 Subject: [PATCH 018/370] Categorize linters we want and linters we don't This takes all linters that are currnently returning warnings about our codebase and puts them in one of two categories. The first are linters we'd like to enable, but that will require updates for compliance. The second are linters we don't want to enable. Signed-off-by: Nic Cope --- .golangci.yml | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 71a266429..68adc2279 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -125,6 +125,87 @@ linters: - maligned - nosnakecase + # These are linters we'd like to enable, but that will be labor intensive to + # make existing code compliant. + - wrapcheck + - varnamelen + - thelper + - testpackage + - tagliatelle + - stylecheck + - paralleltest + - nonamedreturns + - wastedassign + - predeclared + - ireturn + - nilnil + - inamedparam + - gomnd + - interfacebloat + - gochecknoglobals + - forcetypeassert + - containedctx + - dupword + - depguard # TODO(negz): Use this to avoid e.g. stretchr/testify. + - gochecknoinits + - gocognit # TODO(negz): Consider switching to this over gocyclo. + - forbidigo + + # Below are linters that lint for things we don't value. Each entry below + # this line must have a comment explaining the rationale. + + # These linters add whitespace in an attempt to make code more readable. + # This isn't a widely accepted Go best practice, and would be laborious to + # apply to existing code. + - wsl + - nlreturn + + # Warns about uses of fmt.Sprintf that are less performant than alternatives + # such as string concatenation. We value readability more than performance + # unless performance is measured to be an issue. + - perfsprint + + # This linter: + # + # 1. Requires errors.Is/errors.As to test equality. + # 2. Requires all errors be wrapped with fmt.Errorf specifically. + # 3. Disallows errors.New inline - requires package level errors. + # + # 1 is covered by other linters. 2 is covered by wrapcheck, which can also + # handle our use of crossplane-runtime's errors package. 3 is more strict + # than we need. Not every error needs to be tested for equality. + - goerr113 + + # These linters duplicate gocyclo, but calculate complexity differently. + - cyclop + - nestif + - funlen + - maintidx + + # Enforces max line length. It's not idiomatic to enforce a strict limit on + # line length in Go. We'd prefer to lint for things that often cause long + # lines, like functions with too many parameters or long parameter names + # that duplicate their types. + - lll + + # Warns about struct instantiations that don't specify every field. Could be + # useful in theory to catch fields that are accidentally omitted. Seems like + # it would have many more false positives than useful catches, though. + - exhaustruct + + # Warns about TODO comments. The rationale being they should be issues + # instead. We're okay with using TODO to track minor cleanups for next time + # we touch a particular file. + - godox + + # Warns about duplicated code blocks within the same file. Could be useful + # to prompt folks to think about whether code should be broken out into a + # function, but generally we're less worried about DRY and fine with a + # little copying. We don't want to give folks the impression that we require + # every duplicated code block to be factored out into a function. + - dupl + + fast: false From d954ec6b26fc0a2317229b8bc00979da17001ca4 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:18:04 -0800 Subject: [PATCH 019/370] Enable forbidigo linter With its default configuration this linter just finds fmt.Print* calls, which are usually debug statements left in by accident. We had a few of these where we instead should have been using t.Log or k.Stdout. Signed-off-by: Nic Cope --- .golangci.yml | 1 - cmd/crank/beta/top/top.go | 4 ++-- test/e2e/funcs/collect.go | 18 +++++++++--------- test/e2e/funcs/feature.go | 4 ++-- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 68adc2279..c883b3d5c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -149,7 +149,6 @@ linters: - depguard # TODO(negz): Use this to avoid e.g. stretchr/testify. - gochecknoinits - gocognit # TODO(negz): Consider switching to this over gocyclo. - - forbidigo # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index bef14f993..b5ff854d2 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -138,7 +138,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc logger.Debug("Fetched all Crossplane pods", "pods", crossplanePods, "namespace", c.Namespace) if len(crossplanePods) == 0 { - fmt.Println("No Crossplane pods found in the namespace", c.Namespace) + fmt.Fprintln(k.Stdout, "No Crossplane pods found in the namespace", c.Namespace) return nil } @@ -172,7 +172,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc if c.Summary { printPodsSummary(k.Stdout, crossplanePods) logger.Debug("Printed pods summary") - fmt.Println() + fmt.Fprintln(k.Stdout) } if err := printPodsTable(k.Stdout, crossplanePods); err != nil { diff --git a/test/e2e/funcs/collect.go b/test/e2e/funcs/collect.go index 2db730761..294af6d41 100644 --- a/test/e2e/funcs/collect.go +++ b/test/e2e/funcs/collect.go @@ -18,8 +18,8 @@ package funcs import ( "context" - "fmt" "strings" + "testing" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -51,7 +51,7 @@ type coordinate struct { // // Note: this is a pretty expensive operation only suited for e2e tests with // small clusters. -func buildRelatedObjectGraph(ctx context.Context, discoveryClient discovery.DiscoveryInterface, client dynamic.Interface, mapper meta.RESTMapper) (map[coordinate][]coordinate, error) { +func buildRelatedObjectGraph(ctx context.Context, t *testing.T, discoveryClient discovery.DiscoveryInterface, client dynamic.Interface, mapper meta.RESTMapper) (map[coordinate][]coordinate, error) { // Discover all resource types resourceLists, err := discoveryClient.ServerPreferredResources() if err != nil { @@ -110,7 +110,7 @@ func buildRelatedObjectGraph(ctx context.Context, discoveryClient discovery.Disc group, version := parseAPIVersion(ref.APIVersion) rm, err := mapper.RESTMapping(schema.GroupKind{Group: group, Kind: ref.Kind}, version) if err != nil { - fmt.Printf("cannot find REST mapping for %v: %v\n", ref, err) + t.Logf("cannot find REST mapping for %v: %v\n", ref, err) continue } owner := coordinate{ @@ -141,7 +141,7 @@ func parseAPIVersion(apiVersion string) (group, version string) { // RelatedObjects returns all objects related to the supplied object through // ownership, i.e. the returned objects are transitively owned by obj, or // resource reference. -func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Object) ([]client.Object, error) { +func RelatedObjects(ctx context.Context, t *testing.T, config *rest.Config, objs ...client.Object) ([]client.Object, error) { dynClient, err := dynamic.NewForConfig(config) if err != nil { return nil, err @@ -159,7 +159,7 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj return nil, err } - ownershipGraph, err := buildRelatedObjectGraph(ctx, discoveryClient, dynClient, mapper) + ownershipGraph, err := buildRelatedObjectGraph(ctx, t, discoveryClient, dynClient, mapper) if err != nil { return nil, errors.Wrap(err, "cannot build ownership graph") } @@ -170,7 +170,7 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj gvk := obj.GetObjectKind().GroupVersionKind() rm, err := mapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { - fmt.Printf("cannot find REST mapping for %s: %v\n", gvk, err) + t.Logf("cannot find REST mapping for %s: %v\n", gvk, err) continue } @@ -181,15 +181,15 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj }, seen)...) } - return loadCoordinates(ctx, dynClient, coords), nil + return loadCoordinates(ctx, t, dynClient, coords), nil } -func loadCoordinates(ctx context.Context, dynClient dynamic.Interface, coords []coordinate) []client.Object { +func loadCoordinates(ctx context.Context, t *testing.T, dynClient dynamic.Interface, coords []coordinate) []client.Object { ret := make([]client.Object, 0, len(coords)) for _, coord := range coords { other, err := dynClient.Resource(coord.GroupVersionResource).Namespace(coord.Namespace).Get(ctx, coord.Name, metav1.GetOptions{}) if err != nil { - fmt.Printf("cannot get %v: %v\n", coord, err) + t.Logf("cannot get %v: %v\n", coord, err) continue } ret = append(ret, other) diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index 90cbfe1e5..d95ae4246 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -186,7 +186,7 @@ func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func start := time.Now() if err := wait.For(conditions.New(c.Client().Resources()).ResourcesDeleted(list), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { objs := itemsToObjects(list.Items) - related, _ := RelatedObjects(ctx, c.Client().RESTConfig(), objs...) + related, _ := RelatedObjects(ctx, t, c.Client().RESTConfig(), objs...) events := valueOrError(eventString(ctx, c.Client().RESTConfig(), append(objs, related...)...)) t.Errorf("resources not deleted: %v:\n\n%s\n%s\nRelated objects:\n\n%s\n", err, toYAML(objs...), events, toYAML(related...)) @@ -269,7 +269,7 @@ func ResourcesHaveConditionWithin(d time.Duration, dir, pattern string, cds ...x start := time.Now() if err := wait.For(conditions.New(c.Client().Resources()).ResourcesMatch(list, match), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { objs := itemsToObjects(list.Items) - related, _ := RelatedObjects(ctx, c.Client().RESTConfig(), objs...) + related, _ := RelatedObjects(ctx, t, c.Client().RESTConfig(), objs...) events := valueOrError(eventString(ctx, c.Client().RESTConfig(), append(objs, related...)...)) t.Errorf("resources did not have desired conditions: %s: %v:\n\n%s\n%s\nRelated objects:\n\n%s\n", desired, err, toYAML(objs...), events, toYAML(related...)) From d5dc0aa3813fe8fae172491a48c03f48e0227700 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:19:34 -0800 Subject: [PATCH 020/370] Remove impossible nil condition The 'nilness' linter notices this in my editor but not in make reviewable for some reason. Err could never be nil here because we check err for nilness before the for loop. The err declaration inside the for loop is scoped to that loop. Signed-off-by: Nic Cope --- cmd/crank/beta/top/top.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index b5ff854d2..170858269 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -157,9 +157,6 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } } - if err != nil { - return errors.Wrap(err, errGetPodMetrics) - } logger.Debug("Added metrics to Crossplane pods") sort.Slice(crossplanePods, func(i, j int) bool { From d27ffc21da430ca196fdbc707bd1c1482baad742 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:35:17 -0800 Subject: [PATCH 021/370] Switch from gocyclo to gocognit They do the same thing, but calculate cognitive complexity differently. Notably gocognit considers simple switch statements less complex, but nested conditionals more complex. This removes the need for a lot of //nolint:gocyclo comments that we had just for long switches. I notice nolintlint seems to automatically be removing these no-op //nolint comments now too, which is great. Signed-off-by: Nic Cope --- .golangci.yml | 16 ++++------------ apis/apiextensions/v1/composition_common.go | 2 +- apis/apiextensions/v1/composition_transforms.go | 4 ---- .../v1beta1/zz_generated.composition_common.go | 2 +- .../zz_generated.composition_transforms.go | 4 ---- .../beta/convert/deploymentruntime/converter.go | 6 +++--- cmd/crank/beta/render/cmd.go | 2 +- cmd/crank/beta/render/render.go | 4 ++-- cmd/crank/beta/render/runtime_docker.go | 2 +- cmd/crank/beta/top/top.go | 2 +- cmd/crank/beta/trace/internal/printer/default.go | 4 ++-- cmd/crank/beta/trace/trace.go | 2 +- cmd/crank/beta/validate/cmd.go | 2 +- cmd/crank/beta/validate/manager.go | 2 +- cmd/crank/beta/validate/validate.go | 2 +- cmd/crank/beta/xpkg/init.go | 2 +- cmd/crank/xpkg/install.go | 2 +- cmd/crank/xpkg/login.go | 2 +- cmd/crank/xpkg/push.go | 2 +- cmd/crossplane/core/core.go | 2 +- .../controller/apiextensions/claim/reconciler.go | 2 +- .../controller/apiextensions/claim/syncer_csa.go | 2 +- .../controller/apiextensions/claim/syncer_ssa.go | 2 +- .../composite/composition_functions.go | 2 +- .../apiextensions/composite/composition_pt.go | 4 ++-- .../composite/composition_transforms.go | 4 ++-- .../apiextensions/composite/connection.go | 2 +- .../composite/environment_selector.go | 4 ++-- .../controller/apiextensions/composite/ready.go | 2 -- .../apiextensions/composite/reconciler.go | 2 +- .../apiextensions/composition/reconciler.go | 2 +- .../apiextensions/definition/composed.go | 2 +- .../apiextensions/definition/reconciler.go | 2 +- .../apiextensions/offered/reconciler.go | 2 +- .../controller/apiextensions/usage/reconciler.go | 2 +- internal/controller/pkg/manager/reconciler.go | 2 +- internal/controller/pkg/resolver/reconciler.go | 2 +- internal/controller/pkg/revision/dependency.go | 2 +- internal/controller/pkg/revision/establisher.go | 8 ++++---- internal/controller/pkg/revision/imageback.go | 2 +- internal/controller/pkg/revision/reconciler.go | 2 +- .../pkg/revision/runtime_override_options.go | 2 +- internal/controller/pkg/revision/watch.go | 4 ++-- .../rbac/provider/binding/reconciler.go | 2 +- .../controller/rbac/provider/roles/reconciler.go | 2 +- .../controller/rbac/provider/roles/requests.go | 2 +- internal/initializer/crds.go | 2 +- internal/initializer/crds_migrator.go | 2 +- internal/initializer/installer.go | 2 +- internal/initializer/webhook_configurations.go | 2 +- .../apiextensions/v1/composition/handler.go | 2 +- internal/xpkg/build.go | 2 +- internal/xpkg/name.go | 2 +- internal/xpkg/upbound/context.go | 2 +- .../apiextensions/v1/composition/patches.go | 6 ++---- test/e2e/funcs/feature.go | 4 ++-- 56 files changed, 69 insertions(+), 89 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c883b3d5c..ce097a9d9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -41,10 +41,6 @@ linters-settings: - blank - dot - gocyclo: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 10 - maligned: # print struct with more effective memory layout or not, false by default suggest-new: true @@ -110,6 +106,7 @@ linters-settings: linters: enable-all: true + fast: false disable: # These linters are all deprecated. We disable them explicitly to avoid the @@ -148,7 +145,6 @@ linters: - dupword - depguard # TODO(negz): Use this to avoid e.g. stretchr/testify. - gochecknoinits - - gocognit # TODO(negz): Consider switching to this over gocyclo. # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -175,7 +171,8 @@ linters: # than we need. Not every error needs to be tested for equality. - goerr113 - # These linters duplicate gocyclo, but calculate complexity differently. + # These linters duplicate gocognit, but calculate complexity differently. + - gocyclo - cyclop - nestif - funlen @@ -204,19 +201,14 @@ linters: # every duplicated code block to be factored out into a function. - dupl - - fast: false - - issues: # Excluding configuration per-path and per-linter exclude-rules: # Exclude some linters from running on tests files. - path: _test(ing)?\.go linters: - - gocyclo + - gocognit - errcheck - - dupl - gosec - scopelint - unparam diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 624b9d406..14982abd1 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -200,7 +200,7 @@ func (m *MatchConditionReadinessCheck) Validate() *field.Error { } // Validate checks if the readiness check is logically valid. -func (r *ReadinessCheck) Validate() *field.Error { //nolint:gocyclo // This function is not that complex, just a switch +func (r *ReadinessCheck) Validate() *field.Error { if !r.Type.IsValid() { return field.Invalid(field.NewPath("type"), string(r.Type), "unknown readiness check type") } diff --git a/apis/apiextensions/v1/composition_transforms.go b/apis/apiextensions/v1/composition_transforms.go index 258e380ed..6a31c0204 100644 --- a/apis/apiextensions/v1/composition_transforms.go +++ b/apis/apiextensions/v1/composition_transforms.go @@ -73,8 +73,6 @@ type Transform struct { } // Validate this Transform is valid. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func (t *Transform) Validate() *field.Error { switch t.Type { case TransformTypeMath: @@ -394,8 +392,6 @@ type StringTransform struct { } // Validate checks this StringTransform is valid. -// -//nolint:gocyclo // just a switch func (s *StringTransform) Validate() *field.Error { switch s.Type { case StringTransformTypeFormat, "": diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index 20aada02d..edc389696 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -202,7 +202,7 @@ func (m *MatchConditionReadinessCheck) Validate() *field.Error { } // Validate checks if the readiness check is logically valid. -func (r *ReadinessCheck) Validate() *field.Error { //nolint:gocyclo // This function is not that complex, just a switch +func (r *ReadinessCheck) Validate() *field.Error { if !r.Type.IsValid() { return field.Invalid(field.NewPath("type"), string(r.Type), "unknown readiness check type") } diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go index b73c57c46..6c6847c2f 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go @@ -75,8 +75,6 @@ type Transform struct { } // Validate this Transform is valid. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func (t *Transform) Validate() *field.Error { switch t.Type { case TransformTypeMath: @@ -396,8 +394,6 @@ type StringTransform struct { } // Validate checks this StringTransform is valid. -// -//nolint:gocyclo // just a switch func (s *StringTransform) Validate() *field.Error { switch s.Type { case StringTransformTypeFormat, "": diff --git a/cmd/crank/beta/convert/deploymentruntime/converter.go b/cmd/crank/beta/convert/deploymentruntime/converter.go index 19f7fcf99..ea26129ac 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter.go @@ -57,7 +57,7 @@ func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (* return drc, nil } -func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1beta1.DeploymentTemplate { //nolint:gocyclo // Just a lot of if, then set field +func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1beta1.DeploymentTemplate { if cc == nil || !shouldCreateDeploymentTemplate(cc) { return nil } @@ -135,7 +135,7 @@ func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1be return dt } -func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Container { //nolint:gocyclo // Just a lot of if, then set field +func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Container { if cc == nil || !shouldCreateDeploymentTemplateContainer(cc) { return nil } @@ -235,7 +235,7 @@ func withDeploymentTemplate(dt *v1beta1.DeploymentTemplate) func(*v1beta1.Deploy // shouldCreateDeploymentTemplate determines whether we should create a deployment // template in the DeploymentRuntimeConfig. -func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { //nolint:gocyclo // There are a lot of triggers for this, but it's not complex +func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { return len(cc.Labels) > 0 || len(cc.Annotations) > 0 || cc.Spec.Metadata != nil || diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/beta/render/cmd.go index 7baf1eda0..841f49062 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/beta/render/cmd.go @@ -119,7 +119,7 @@ func (c *Cmd) AfterApply() error { } // Run render. -func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocyclo // Only a touch over. +func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit // Only a touch over. xr, err := LoadCompositeResource(c.fs, c.CompositeResource) if err != nil { return errors.Wrapf(err, "cannot load composite resource from %q", c.CompositeResource) diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index 1cd88b4ac..37268302e 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -89,7 +89,7 @@ type Outputs struct { } // Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. -func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocyclo // TODO(negz): Should we refactor to break this up a bit? +func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocognit // TODO(negz): Should we refactor to break this up a bit? // Run our Functions. conns := map[string]*grpc.ClientConn{} for _, fn := range in.Functions { @@ -329,7 +329,7 @@ func SetComposedResourceMetadata(cd resource.Object, xr resource.Composite, name return errors.Wrapf(meta.AddControllerReference(cd, or), "cannot set composite resource %q as controller ref of composed resource", xr.GetName()) } -func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { //nolint:gocyclo // There is not much to simplify here. +func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { if len(ers) == 0 || selector == nil { return nil, nil } diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/beta/render/runtime_docker.go index cb07e1832..c096e216b 100644 --- a/cmd/crank/beta/render/runtime_docker.go +++ b/cmd/crank/beta/render/runtime_docker.go @@ -154,7 +154,7 @@ func GetRuntimeDocker(fn pkgv1beta1.Function, log logging.Logger) (*RuntimeDocke var _ Runtime = &RuntimeDocker{} // Start a Function as a Docker container. -func (r *RuntimeDocker) Start(ctx context.Context) (RuntimeContext, error) { //nolint:gocyclo // TODO(phisco): Refactor to break this up a bit, not so easy. +func (r *RuntimeDocker) Start(ctx context.Context) (RuntimeContext, error) { r.log.Debug("Starting Docker container runtime", "image", r.Image) c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index 170858269..60a224beb 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -101,7 +101,7 @@ func (r *defaultPrinterRow) String() string { } // Run runs the top command. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO:(piotr1215) refactor to use dedicated functions +func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger = logger.WithValues("cmd", "top") logger.Debug("Tabwriter header created") diff --git a/cmd/crank/beta/trace/internal/printer/default.go b/cmd/crank/beta/trace/internal/printer/default.go index 17322e9e4..3641e9e56 100644 --- a/cmd/crank/beta/trace/internal/printer/default.go +++ b/cmd/crank/beta/trace/internal/printer/default.go @@ -211,7 +211,7 @@ func (p *DefaultPrinter) Print(w io.Writer, root *resource.Resource) error { // getResourceStatus returns a string that represents an entire row of status // information for the resource. -func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { //nolint:gocyclo // NOTE(phisco): just a few switches, not much to do here +func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { readyCond := r.GetCondition(xpv1.TypeReady) syncedCond := r.GetCondition(xpv1.TypeSynced) var status, m string @@ -262,7 +262,7 @@ func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringe } } -func getPkgResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { //nolint:gocyclo // TODO: just a few switches, not much to do here +func getPkgResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { var err error var packageImg, state, status, m string diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 6ad668ae2..b113aeb41 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -102,7 +102,7 @@ Examples: } // Run runs the trace command. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO(phisco): refactor +func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { ctx := context.Background() logger = logger.WithValues("Resource", c.Resource, "Name", c.Name) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index 161d1866b..1e376fdd8 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -81,7 +81,7 @@ func (c *Cmd) AfterApply() error { } // Run validate. -func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { //nolint:gocyclo // stdin check makes it over the top +func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { if c.Resources == "-" && c.Extensions == "-" { return errors.New("cannot use stdin for both extensions and resources") } diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 84a2f49c4..c3b4b081a 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -74,7 +74,7 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { } // PrepExtensions converts the unstructured XRDs/CRDs to CRDs and extract package images to add as a dependency. -func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error { //nolint:gocyclo // the function itself is not that complex, it just has different cases +func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error { //nolint:gocognit // the function itself is not that complex, it just has different cases for _, e := range extensions { switch e.GroupVersionKind().GroupKind() { case schema.GroupKind{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: diff --git a/cmd/crank/beta/validate/validate.go b/cmd/crank/beta/validate/validate.go index a618a7509..2716d1bf2 100644 --- a/cmd/crank/beta/validate/validate.go +++ b/cmd/crank/beta/validate/validate.go @@ -91,7 +91,7 @@ func newValidatorsAndStructurals(crds []*extv1.CustomResourceDefinition) (map[ru } // SchemaValidation validates the resources against the given CRDs. -func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.CustomResourceDefinition, skipSuccessLogs bool, w io.Writer) error { //nolint:gocyclo // printing the output increases the cyclomatic complexity a little bit +func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.CustomResourceDefinition, skipSuccessLogs bool, w io.Writer) error { //nolint:gocognit // printing the output increases the cyclomatic complexity a little bit schemaValidators, structurals, err := newValidatorsAndStructurals(crds) if err != nil { return errors.Wrap(err, "cannot create schema validators") diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index 257cf4599..b8f6d57c6 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -102,7 +102,7 @@ Examples: return fmt.Sprintf(tpl, b.String()) } -func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // file check switch and print error check make it over the top +func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { f, err := os.Stat(c.Directory) switch { case err == nil && !f.IsDir(): diff --git a/cmd/crank/xpkg/install.go b/cmd/crank/xpkg/install.go index bcadf9b01..1839cb7fb 100644 --- a/cmd/crank/xpkg/install.go +++ b/cmd/crank/xpkg/install.go @@ -84,7 +84,7 @@ Examples: } // Run the package install cmd. -func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO(negz): Can anything be broken out here? +func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { pkgName := c.Name if pkgName == "" { ref, err := name.ParseReference(c.Package, name.WithDefaultRegistry(xpkg.DefaultRegistry)) diff --git a/cmd/crank/xpkg/login.go b/cmd/crank/xpkg/login.go index 614bbc446..f51a9275d 100644 --- a/cmd/crank/xpkg/login.go +++ b/cmd/crank/xpkg/login.go @@ -100,7 +100,7 @@ func (c *loginCmd) AfterApply(kongCtx *kong.Context) error { } // Run executes the login command. -func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { //nolint:gocyclo // TODO(phisco): refactor +func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { auth, profType, err := constructAuth(c.Username, c.Token, c.Password) if err != nil { return errors.Wrap(err, "failed to construct auth") diff --git a/cmd/crank/xpkg/push.go b/cmd/crank/xpkg/push.go index 21870818f..a97832f40 100644 --- a/cmd/crank/xpkg/push.go +++ b/cmd/crank/xpkg/push.go @@ -94,7 +94,7 @@ func (c *pushCmd) AfterApply() error { } // Run runs the push cmd. -func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocyclo // This feels easier to read as-is. +func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocognit // This feels easier to read as-is. upCtx, err := upbound.NewFromFlags(c.Flags, upbound.AllowMissingProfile()) if err != nil { return err diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 0781968c8..5196fa48c 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -123,7 +123,7 @@ type startCommand struct { } // Run core Crossplane controllers. -func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //nolint:gocyclo // Only slightly over. +func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //nolint:gocognit // Only slightly over. cfg, err := ctrl.GetConfig() if err != nil { return errors.Wrap(err, "cannot get config") diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index 6f3196bb3..d8836140c 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -321,7 +321,7 @@ func NewReconciler(m manager.Manager, of resource.CompositeClaimKind, with resou } // Reconcile a composite resource claim with a concrete composite resource. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Complexity is tough to avoid here. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Complexity is tough to avoid here. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/claim/syncer_csa.go b/internal/controller/apiextensions/claim/syncer_csa.go index e342c31ca..47bbf4f65 100644 --- a/internal/controller/apiextensions/claim/syncer_csa.go +++ b/internal/controller/apiextensions/claim/syncer_csa.go @@ -67,7 +67,7 @@ func NewClientSideCompositeSyncer(c client.Client, ng names.NameGenerator) *Clie // Sync the supplied claim with the supplied composite resource (XR). Syncing // may involve creating and binding the XR. -func (s *ClientSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { //nolint:gocyclo // This complex process seems easier to follow in one long method. +func (s *ClientSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { // First we sync claim -> XR. // It's possible we're being asked to configure a statically provisioned XR. diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 14554b7d8..18d4f405f 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -117,7 +117,7 @@ func NewServerSideCompositeSyncer(c client.Client, ng names.NameGenerator) *Serv // Sync the supplied claim with the supplied composite resource (XR). Syncing // may involve creating and binding the XR. -func (s *ServerSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { //nolint:gocyclo // This complex process seems easier to follow in one long method. +func (s *ServerSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { // First we sync claim -> XR. // Create an empty XR patch object. We'll use this object to ensure we only diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index c270858d5..4f1c6d2b6 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -243,7 +243,7 @@ func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComp } // Compose resources using the Functions pipeline. -func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocyclo // We probably don't want any further abstraction for the sake of reduced complexity. +func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocognit // We probably don't want any further abstraction for the sake of reduced complexity. // Observe our existing composed resources. We need to do this before we // render any P&T templates, so that we can make sure we use the same // composed resource names (as in, metadata.name) every time. We know what diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 99aba85da..861bcf874 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -159,7 +159,7 @@ func NewPTComposer(kube client.Client, o ...PTComposerOption) *PTComposer { // 3. Apply all composed resources that rendered successfully. // 4. Observe the readiness and connection details of all composed resources // that rendered successfully. -func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocyclo // Breaking this up doesn't seem worth yet more layers of abstraction. +func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocognit // Breaking this up doesn't seem worth yet more layers of abstraction. // Inline PatchSets before composing resources. ct, err := ComposedTemplates(req.Revision.Spec.PatchSets, req.Revision.Spec.Resources) if err != nil { @@ -439,7 +439,7 @@ func NewGarbageCollectingAssociator(c client.Client) *GarbageCollectingAssociato } // AssociateTemplates with composed resources. -func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { //nolint:gocyclo // Only slightly over (13). +func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { templates := map[ResourceName]int{} for i, t := range ct { if t.Name == nil { diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index f49317cf3..0547a1e81 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -79,7 +79,7 @@ const ( ) // Resolve the supplied Transform. -func Resolve(t v1.Transform, input any) (any, error) { //nolint:gocyclo // This is a long but simple/same-y switch. +func Resolve(t v1.Transform, input any) (any, error) { var out any var err error @@ -283,7 +283,7 @@ func unmarshalJSON(j extv1.JSON, output *any) error { } // ResolveString resolves a String transform. -func ResolveString(t v1.StringTransform, input any) (string, error) { //nolint:gocyclo // This is a long but simple/same-y switch. +func ResolveString(t v1.StringTransform, input any) (string, error) { switch t.Type { case v1.StringTransformTypeFormat: if t.Format == nil { diff --git a/internal/controller/apiextensions/composite/connection.go b/internal/controller/apiextensions/composite/connection.go index e66484213..3eb1cca5a 100644 --- a/internal/controller/apiextensions/composite/connection.go +++ b/internal/controller/apiextensions/composite/connection.go @@ -196,7 +196,7 @@ func (fn ConnectionDetailsExtractorFn) ExtractConnection(cd resource.Composed, c // ExtractConnectionDetails extracts XR connection details from the supplied // composed resource. If no ExtractConfigs are supplied no connection details // will be returned. -func ExtractConnectionDetails(cd resource.Composed, data managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { //nolint:gocyclo // TODO(negz): Break extraction out from validation, like we do with readiness. +func ExtractConnectionDetails(cd resource.Composed, data managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { out := map[string][]byte{} for _, cfg := range cfg { if cfg.Name == "" { diff --git a/internal/controller/apiextensions/composite/environment_selector.go b/internal/controller/apiextensions/composite/environment_selector.go index 3d035f793..f5bc2991f 100644 --- a/internal/controller/apiextensions/composite/environment_selector.go +++ b/internal/controller/apiextensions/composite/environment_selector.go @@ -135,7 +135,7 @@ func (s *APIEnvironmentSelector) lookUpConfigs(ctx context.Context, cr resource. return res, nil } -func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alpha1.EnvironmentConfigList, selector *v1.EnvironmentSourceSelector) ([]corev1.ObjectReference, error) { //nolint:gocyclo // TODO: refactor +func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alpha1.EnvironmentConfigList, selector *v1.EnvironmentSourceSelector) ([]corev1.ObjectReference, error) { ec := make([]v1alpha1.EnvironmentConfig, 0) if cl == nil { @@ -184,7 +184,7 @@ func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alp return envConfigs, nil } -func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { //nolint:gocyclo // TODO(phisco): refactor +func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { p := make([]struct { ec v1alpha1.EnvironmentConfig val any diff --git a/internal/controller/apiextensions/composite/ready.go b/internal/controller/apiextensions/composite/ready.go index 50b3fcdd8..13284179a 100644 --- a/internal/controller/apiextensions/composite/ready.go +++ b/internal/controller/apiextensions/composite/ready.go @@ -168,8 +168,6 @@ func (c ReadinessCheck) Validate() error { } // IsReady runs the readiness check against the supplied object. -// -//nolint:gocyclo // just a switch func (c ReadinessCheck) IsReady(p *fieldpath.Paved, o ConditionedObject) (bool, error) { if err := c.Validate(); err != nil { return false, errors.Wrap(err, errInvalidCheck) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 05dad5837..e85dfe4ad 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -467,7 +467,7 @@ type Reconciler struct { } // Reconcile a composite resource. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcile methods are often very complex. Be wary. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcile methods are often very complex. Be wary. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/composition/reconciler.go b/internal/controller/apiextensions/composition/reconciler.go index 9999eb27b..f090dae5c 100644 --- a/internal/controller/apiextensions/composition/reconciler.go +++ b/internal/controller/apiextensions/composition/reconciler.go @@ -122,7 +122,7 @@ type Reconciler struct { } // Reconcile a Composition. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Only slightly over (12). +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go index 1fb643eee..0f3f108ea 100644 --- a/internal/controller/apiextensions/definition/composed.go +++ b/internal/controller/apiextensions/definition/composed.go @@ -215,7 +215,7 @@ func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupV // // Note that this complements WatchComposedResources which starts informers for // the composed resources referenced by a composite resource. -func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context.Context) { //nolint:gocyclo // splitting it doesn't make it easier to read. +func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context.Context) { crds := extv1.CustomResourceDefinitionList{} if err := i.cluster.GetClient().List(ctx, &crds); err != nil { i.log.Debug(errListCRDs, "error", err) diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 503f56925..5f469c4fd 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -281,7 +281,7 @@ type Reconciler struct { // Reconcile a CompositeResourceDefinition by defining a new kind of composite // resource and starting a controller to reconcile it. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index 7804e44a5..8f1eaf990 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -241,7 +241,7 @@ type Reconciler struct { // Reconcile a CompositeResourceDefinition by defining a new kind of composite // resource claim and starting a controller to reconcile it. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 3de23a37b..836988763 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -207,7 +207,7 @@ type Reconciler struct { // Reconcile a Usage resource by resolving its selectors, defining ownership // relationship, adding a finalizer and handling proper deletion. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are typically complex. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are typically complex. log := r.log.WithValues("request", req) ctx, cancel := context.WithTimeout(ctx, reconcileTimeout) defer cancel() diff --git a/internal/controller/pkg/manager/reconciler.go b/internal/controller/pkg/manager/reconciler.go index 6b11fd263..d65f8866e 100644 --- a/internal/controller/pkg/manager/reconciler.go +++ b/internal/controller/pkg/manager/reconciler.go @@ -268,7 +268,7 @@ func NewReconciler(mgr ctrl.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/pkg/resolver/reconciler.go b/internal/controller/pkg/resolver/reconciler.go index ea94c666b..50fb9e92e 100644 --- a/internal/controller/pkg/resolver/reconciler.go +++ b/internal/controller/pkg/resolver/reconciler.go @@ -163,7 +163,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package revision. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 91e439d56..c70772a16 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -70,7 +70,7 @@ func NewPackageDependencyManager(c client.Client, nd dag.NewDAGFn, t v1beta1.Pac } // Resolve resolves package dependencies. -func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Object, pr v1.PackageRevision) (found, installed, invalid int, err error) { //nolint:gocyclo // TODO(negz): Can this be refactored for less complexity? +func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Object, pr v1.PackageRevision) (found, installed, invalid int, err error) { //nolint:gocognit // TODO(negz): Can this be refactored for less complexity? // If we are inactive, we don't need to resolve dependencies. if pr.GetDesiredState() == v1.PackageRevisionInactive { return 0, 0, 0, nil diff --git a/internal/controller/pkg/revision/establisher.go b/internal/controller/pkg/revision/establisher.go index 9760d05fd..47c1c2718 100644 --- a/internal/controller/pkg/revision/establisher.go +++ b/internal/controller/pkg/revision/establisher.go @@ -128,7 +128,7 @@ func (e *APIEstablisher) Establish(ctx context.Context, objs []runtime.Object, p // ReleaseObjects removes control of owned resources in the API server for a // package revision. -func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRevision) error { //nolint:gocyclo // complexity coming from parallelism. +func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRevision) error { //nolint:gocognit // complexity coming from parallelism. // Note(turkenh): We rely on status.objectRefs to get the list of objects // that are controlled by the package revision. Relying on the status is // not ideal as it might get lost (e.g. if the status subresource is @@ -222,7 +222,7 @@ func (e *APIEstablisher) addLabels(objs []runtime.Object, parent v1.PackageRevis return nil } -func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, parent v1.PackageRevision, control bool) (allObjs []currentDesired, err error) { //nolint:gocyclo // TODO(negz): Refactor this to break up complexity. +func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, parent v1.PackageRevision, control bool) (allObjs []currentDesired, err error) { //nolint:gocognit // TODO(negz): Refactor this to break up complexity. var webhookTLSCert []byte if parentWithRuntime, ok := parent.(v1.PackageRevisionWithRuntime); ok && control { webhookTLSCert, err = e.getWebhookTLSCert(ctx, parentWithRuntime) @@ -305,7 +305,7 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa return allObjs, nil } -func (e *APIEstablisher) enrichControlledResource(res runtime.Object, webhookTLSCert []byte, parent v1.PackageRevision) error { //nolint:gocyclo // just a switch +func (e *APIEstablisher) enrichControlledResource(res runtime.Object, webhookTLSCert []byte, parent v1.PackageRevision) error { //nolint:gocognit // just a switch // The generated webhook configurations have a static hard-coded name // that the developers of the providers can't affect. Here, we make sure // to distinguish one from the other by setting the name to the parent @@ -389,7 +389,7 @@ func (e *APIEstablisher) getWebhookTLSCert(ctx context.Context, parentWithRuntim return webhookTLSCert, nil } -func (e *APIEstablisher) establish(ctx context.Context, allObjs []currentDesired, parent client.Object, control bool) ([]xpv1.TypedReference, error) { //nolint:gocyclo // Only slightly over (12). +func (e *APIEstablisher) establish(ctx context.Context, allObjs []currentDesired, parent client.Object, control bool) ([]xpv1.TypedReference, error) { g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrentEstablishers) out := make(chan xpv1.TypedReference, len(allObjs)) diff --git a/internal/controller/pkg/revision/imageback.go b/internal/controller/pkg/revision/imageback.go index 3f886c4c8..81500cf81 100644 --- a/internal/controller/pkg/revision/imageback.go +++ b/internal/controller/pkg/revision/imageback.go @@ -80,7 +80,7 @@ func NewImageBackend(fetcher xpkg.Fetcher, opts ...ImageBackendOption) *ImageBac } // Init initializes an ImageBackend. -func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io.ReadCloser, error) { //nolint:gocyclo // TODO(negz): Can this be made less complex? +func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io.ReadCloser, error) { // NOTE(hasheddan): we use nestedBackend here because simultaneous // reconciles of providers or configurations can lead to the package // revision being overwritten mid-execution in the shared image backend when diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index cc08fb3e9..c08f96cb9 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -464,7 +464,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package revision. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are often very complex. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are often very complex. log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/pkg/revision/runtime_override_options.go b/internal/controller/pkg/revision/runtime_override_options.go index f9aba51ba..3c3212a1c 100644 --- a/internal/controller/pkg/revision/runtime_override_options.go +++ b/internal/controller/pkg/revision/runtime_override_options.go @@ -255,7 +255,7 @@ func DeploymentWithRuntimeContainer() DeploymentOverride { // DeploymentForControllerConfig overrides the deployment with the values // defined in the ControllerConfig. -func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOverride { //nolint:gocyclo // Simple if statements for setting values if they are not nil/empty. +func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOverride { //nolint:gocognit // Simple if statements for setting values if they are not nil/empty. return func(d *appsv1.Deployment) { d.Labels = cc.Labels d.Annotations = cc.Annotations diff --git a/internal/controller/pkg/revision/watch.go b/internal/controller/pkg/revision/watch.go index 936e87272..3b9c838e7 100644 --- a/internal/controller/pkg/revision/watch.go +++ b/internal/controller/pkg/revision/watch.go @@ -67,7 +67,7 @@ func (e *EnqueueRequestForReferencingProviderRevisions) Generic(ctx context.Cont e.add(ctx, evt.Object, q) } -func (e *EnqueueRequestForReferencingProviderRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { //nolint:gocyclo // it will be simplified soon when we clean up the controller config +func (e *EnqueueRequestForReferencingProviderRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { cc, isCC := obj.(*v1alpha1.ControllerConfig) rc, isRC := obj.(*v1beta1.DeploymentRuntimeConfig) @@ -129,7 +129,7 @@ func (e *EnqueueRequestForReferencingFunctionRevisions) Generic(ctx context.Cont e.add(ctx, evt.Object, q) } -func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { //nolint:gocyclo // it will be simplified soon when we clean up the controller config +func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { cc, isCC := obj.(*v1alpha1.ControllerConfig) rc, isRC := obj.(*v1beta1.DeploymentRuntimeConfig) diff --git a/internal/controller/rbac/provider/binding/reconciler.go b/internal/controller/rbac/provider/binding/reconciler.go index 290733b9c..30c9cea79 100644 --- a/internal/controller/rbac/provider/binding/reconciler.go +++ b/internal/controller/rbac/provider/binding/reconciler.go @@ -134,7 +134,7 @@ type Reconciler struct { // Reconcile a ProviderRevision by creating a ClusterRoleBinding that binds a // provider's service account to its system ClusterRole. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcile methods are often very complex. Be wary. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index 4d2ce798e..97e34020a 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -229,7 +229,7 @@ type Reconciler struct { // Reconcile a ProviderRevision by creating a series of opinionated ClusterRoles // that may be bound to allow access to the resources it defines. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Slightly over (13). +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Slightly over (13). log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/rbac/provider/roles/requests.go b/internal/controller/rbac/provider/roles/requests.go index 939a901d9..1ec1c0126 100644 --- a/internal/controller/rbac/provider/roles/requests.go +++ b/internal/controller/rbac/provider/roles/requests.go @@ -128,7 +128,7 @@ func (r Rule) path() path { } // Expand RBAC policy rules into our granular rules. -func Expand(ctx context.Context, rs ...rbacv1.PolicyRule) ([]Rule, error) { //nolint:gocyclo // Granular rules are inherently complex. +func Expand(ctx context.Context, rs ...rbacv1.PolicyRule) ([]Rule, error) { //nolint:gocognit // Granular rules are inherently complex. out := make([]Rule, 0, len(rs)) for _, r := range rs { for _, u := range r.NonResourceURLs { diff --git a/internal/initializer/crds.go b/internal/initializer/crds.go index 4a7683c61..1754b4f5c 100644 --- a/internal/initializer/crds.go +++ b/internal/initializer/crds.go @@ -80,7 +80,7 @@ type CoreCRDs struct { } // Run applies all CRDs in the given directory. -func (c *CoreCRDs) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(negz): Can anything be broken out (maybe the loop body)? +func (c *CoreCRDs) Run(ctx context.Context, kube client.Client) error { var caBundle []byte if c.WebhookTLSSecretRef != nil { s := &corev1.Secret{} diff --git a/internal/initializer/crds_migrator.go b/internal/initializer/crds_migrator.go index 858f52733..4692b39c9 100644 --- a/internal/initializer/crds_migrator.go +++ b/internal/initializer/crds_migrator.go @@ -46,7 +46,7 @@ type CoreCRDsMigrator struct { } // Run applies all CRDs in the given directory. -func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(phisco) refactor +func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { var crd extv1.CustomResourceDefinition if err := kube.Get(ctx, client.ObjectKey{Name: c.crdName}, &crd); err != nil { if kerrors.IsNotFound(err) { diff --git a/internal/initializer/installer.go b/internal/initializer/installer.go index 18810e992..542c9dca6 100644 --- a/internal/initializer/installer.go +++ b/internal/initializer/installer.go @@ -52,7 +52,7 @@ type PackageInstaller struct { } // Run makes sure all specified packages exist. -func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(negz): Could any of this be broken out? +func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { pkgs := make([]client.Object, len(pi.providers)+len(pi.configurations)) // NOTE(hasheddan): we build maps of existing Provider and Configuration // sources to the package names such that we can update the version when a diff --git a/internal/initializer/webhook_configurations.go b/internal/initializer/webhook_configurations.go index e6d8e579f..1e5b9bd4f 100644 --- a/internal/initializer/webhook_configurations.go +++ b/internal/initializer/webhook_configurations.go @@ -75,7 +75,7 @@ type WebhookConfigurations struct { // Run applies all webhook ValidatingWebhookConfigurations and // MutatingWebhookConfiguration in the given directory. -func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // Only slightly over (11). +func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) error { s := &corev1.Secret{} if err := kube.Get(ctx, c.TLSSecretRef, s); err != nil { return errors.Wrap(err, errGetWebhookSecret) diff --git a/internal/validation/apiextensions/v1/composition/handler.go b/internal/validation/apiextensions/v1/composition/handler.go index c9750fee8..7acc4bb4c 100644 --- a/internal/validation/apiextensions/v1/composition/handler.go +++ b/internal/validation/apiextensions/v1/composition/handler.go @@ -79,7 +79,7 @@ type validator struct { } // ValidateCreate validates a Composition. -func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { //nolint:gocyclo // Currently only at 11 +func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { comp, ok := obj.(*v1.Composition) if !ok { return nil, errors.New(errNotComposition) diff --git a/internal/xpkg/build.go b/internal/xpkg/build.go index d68a8f2dc..32ac7e962 100644 --- a/internal/xpkg/build.go +++ b/internal/xpkg/build.go @@ -124,7 +124,7 @@ func WithBase(img v1.Image) BuildOpt { } // Build compiles a Crossplane package from an on-disk package. -func (b *Builder) Build(ctx context.Context, opts ...BuildOpt) (v1.Image, runtime.Object, error) { //nolint:gocyclo // TODO(lsviben) consider refactoring +func (b *Builder) Build(ctx context.Context, opts ...BuildOpt) (v1.Image, runtime.Object, error) { bOpts := &buildOpts{ base: empty.Image, } diff --git a/internal/xpkg/name.go b/internal/xpkg/name.go index 6d69b13b5..1fd73fedd 100644 --- a/internal/xpkg/name.go +++ b/internal/xpkg/name.go @@ -85,7 +85,7 @@ func FriendlyID(name, hash string) string { } // ToDNSLabel converts the string to a valid DNS label. -func ToDNSLabel(s string) string { //nolint:gocyclo // TODO(negz): Document the conditions in this function. +func ToDNSLabel(s string) string { var cut strings.Builder for i := range s { b := s[i] diff --git a/internal/xpkg/upbound/context.go b/internal/xpkg/upbound/context.go index c71a166ea..0d6f545dc 100644 --- a/internal/xpkg/upbound/context.go +++ b/internal/xpkg/upbound/context.go @@ -98,7 +98,7 @@ func AllowMissingProfile() Option { } // NewFromFlags constructs a new context from flags. -func NewFromFlags(f Flags, opts ...Option) (*Context, error) { //nolint:gocyclo // TODO(phisco): imported as is, refactor +func NewFromFlags(f Flags, opts ...Option) (*Context, error) { p, err := config.GetDefaultPath() if err != nil { return nil, err diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 4b6f58a6a..0fe2d8bfb 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -168,7 +168,7 @@ type patchValidationCtx struct { resourceGVK schema.GroupVersionKind } -func (v *Validator) validatePatchWithSchemaInternal(ctx patchValidationCtx) *field.Error { //nolint:gocyclo // mainly due to the switch, not much to refactor +func (v *Validator) validatePatchWithSchemaInternal(ctx patchValidationCtx) *field.Error { var validationErr *field.Error var fromType, toType xpschema.KnownJSONType switch ctx.patch.GetType() { @@ -398,7 +398,7 @@ func validateFieldPathSegment(parent *apiextensions.JSONSchemaProps, segment fie return nil, nil } -func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { //nolint:gocyclo // inherently complex +func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { if parent == nil { return nil, nil } @@ -460,8 +460,6 @@ func validateFieldPathSegmentIndex(parent *apiextensions.JSONSchemaProps, segmen } // IsValidInputForTransform validates the supplied Transform type, taking into consideration also the input type. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func IsValidInputForTransform(t *v1.Transform, fromType v1.TransformIOType) error { switch t.Type { case v1.TransformTypeMath: diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index d95ae4246..b5a2deff8 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -736,7 +736,7 @@ func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path // ComposedResourcesHaveFieldValueWithin fails a test if the composed // resources created by the claim does not have the supplied value at the // supplied path within the supplied duration. -func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { //nolint:gocyclo // Not too much over. +func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { //nolint:gocognit // Not too much over. return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { cm := &claim.Unstructured{} if err := decoder.DecodeFile(os.DirFS(dir), file, cm); err != nil { @@ -894,7 +894,7 @@ func ListedResourcesModifiedWith(list k8s.ObjectList, min int, modify func(objec // LogResources polls the given kind of resources and logs creations, deletions // and changed conditions. -func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { //nolint:gocyclo // this is a test helper +func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { //nolint:gocognit // this is a test helper return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { prev := map[string]map[xpv1.ConditionType]xpv1.Condition{} From 80e45b3ae24a46fb7994b7b51482e5c1cfabde0c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:43:25 -0800 Subject: [PATCH 022/370] Enable gochecknoinits linter We try to avoid using init unless it's really necessary or idiomatic (e.g. registering Kubernetes API types with the scheme). Generally we don't want importing our packages to have magic side effects. Signed-off-by: Nic Cope --- .golangci.yml | 8 +++++++- internal/xpkg/fetch.go | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ce097a9d9..6c3fa9781 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -144,7 +144,6 @@ linters: - containedctx - dupword - depguard # TODO(negz): Use this to avoid e.g. stretchr/testify. - - gochecknoinits # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -212,12 +211,19 @@ issues: - gosec - scopelint - unparam + - gochecknoinits # Ease some gocritic warnings on test files. - path: _test\.go text: "(unnamedResult|exitAfterDefer)" linters: - gocritic + + # It's idiomatic to register Kubernetes types with a package scoped + # SchemeBuilder using an init function. + - path: apis/ + linters: + - gochecknoinits # These are performance optimisations rather than style issues per se. # They warn when function arguments or range values copy a lot of memory diff --git a/internal/xpkg/fetch.go b/internal/xpkg/fetch.go index d991de98a..4963d382a 100644 --- a/internal/xpkg/fetch.go +++ b/internal/xpkg/fetch.go @@ -34,7 +34,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -func init() { +func init() { //nolint:gochecknoinits // See comment below. // NOTE(hasheddan): we set the logrus package-level logger to discard output // due to the fact that the AWS ECR credential helper uses it to log errors // when parsing registry server URL, which happens any time a package is From 53f173d9f738409f9e1e5a49320c36fb4bc0b9d6 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:46:10 -0800 Subject: [PATCH 023/370] Move enabled linters above linter settings Signed-off-by: Nic Cope --- .golangci.yml | 191 +++++++++++++++++++++++++------------------------- 1 file changed, 95 insertions(+), 96 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 6c3fa9781..acfec331f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,102 +8,6 @@ output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" format: colored-line-number -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - # [deprecated] comma-separated list of pairs of the form pkg:regex - # the regex is used to ignore names within pkg. (default "fmt:.*"). - # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io/ioutil:^Read.* - - govet: - # report about shadowed variables - check-shadowing: false - - gofmt: - # simplify code: gofmt with `-s` option, true by default - simplify: true - - gci: - custom-order: true - sections: - - standard - - default - - prefix(github.com/crossplane/crossplane-runtime) - - prefix(github.com/crossplane/crossplane) - - blank - - dot - - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true - - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - - goconst: - # minimal length of string constant, 3 by default - min-len: 3 - # minimal occurrences count to trigger, 3 by default - min-occurrences: 5 - - lll: - # tab width in spaces. Default to 1. - tab-width: 1 - - unused: - # treat code as a program (not a library) and report unused exported identifiers; default is false. - # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find funcs usages. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false - - unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false - - nakedret: - # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 - max-func-lines: 30 - - prealloc: - # XXX: we don't recommend using this linter before doing performance profiling. - # For most programs usage of prealloc will be a premature optimization. - - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - - gocritic: - # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. - # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". - enabled-tags: - - performance - - settings: # settings passed to gocritic - captLocal: # must be valid enabled check name - paramsOnly: true - rangeValCopy: - sizeThreshold: 32 - - nolintlint: - require-explanation: true - require-specific: true - - linters: enable-all: true fast: false @@ -200,6 +104,101 @@ linters: # every duplicated code block to be factored out into a function. - dupl +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + # [deprecated] comma-separated list of pairs of the form pkg:regex + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details + ignore: fmt:.*,io/ioutil:^Read.* + + govet: + # report about shadowed variables + check-shadowing: false + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + + gci: + custom-order: true + sections: + - standard + - default + - prefix(github.com/crossplane/crossplane-runtime) + - prefix(github.com/crossplane/crossplane) + - blank + - dot + + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 5 + + lll: + # tab width in spaces. Default to 1. + tab-width: 1 + + unused: + # treat code as a program (not a library) and report unused exported identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + + prealloc: + # XXX: we don't recommend using this linter before doing performance profiling. + # For most programs usage of prealloc will be a premature optimization. + + # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # True by default. + simple: true + range-loops: true # Report preallocation suggestions on range loops, true by default + for-loops: false # Report preallocation suggestions on for loops, false by default + + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - performance + + settings: # settings passed to gocritic + captLocal: # must be valid enabled check name + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 + + nolintlint: + require-explanation: true + require-specific: true + issues: # Excluding configuration per-path and per-linter exclude-rules: From 6f1bbde49bb7a486838dfa4ed730573d277cb9a1 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 20:53:44 -0800 Subject: [PATCH 024/370] Enable depguard For now we only prevent importing test assertion libraries. In future we could use it to avoid other things (e.g. MPL licensed libraries). Signed-off-by: Nic Cope --- .golangci.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index acfec331f..e9a135978 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,7 +47,6 @@ linters: - forcetypeassert - containedctx - dupword - - depguard # TODO(negz): Use this to avoid e.g. stretchr/testify. # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -198,6 +197,20 @@ linters-settings: nolintlint: require-explanation: true require-specific: true + + depguard: + rules: + no_third_party_test_libraries: + list-mode: lax + files: + - $test + deny: + - pkg: github.com/stretchr/testify + desc: "See https://go.dev/wiki/TestComments#assert-libraries" + - pkg: github.com/onsi/ginkgo + desc: "See https://go.dev/wiki/TestComments#assert-libraries" + - pkg: github.com/onsi/gomega + desc: "See https://go.dev/wiki/TestComments#assert-libraries" issues: # Excluding configuration per-path and per-linter From 3c1196dd77211634ce6affea3323e61775df9f29 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 21:27:15 -0800 Subject: [PATCH 025/370] Enable dupword linter This linter finds duplicate words, e.g. "the the". Signed-off-by: Nic Cope --- .golangci.yml | 1 - .../trace/internal/printer/default_test.go | 36 ++++++++++--------- .../beta/trace/internal/printer/dot_test.go | 2 +- .../trace/internal/printer/printer_test.go | 2 +- .../apiextensions/usage/reconciler.go | 2 +- .../v1/composition/validator_test.go | 2 +- test/e2e/config/environment.go | 2 +- 7 files changed, 25 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e9a135978..268a5b70b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -46,7 +46,6 @@ linters: - gochecknoglobals - forcetypeassert - containedctx - - dupword # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. diff --git a/cmd/crank/beta/trace/internal/printer/default_test.go b/cmd/crank/beta/trace/internal/printer/default_test.go index 1239741c6..1dc93b1b2 100644 --- a/cmd/crank/beta/trace/internal/printer/default_test.go +++ b/cmd/crank/beta/trace/internal/printer/default_test.go @@ -53,6 +53,7 @@ func TestDefaultPrinter(t *testing.T) { }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` NAME SYNCED READY STATUS ObjectStorage/test-resource (default) True True @@ -75,6 +76,7 @@ ObjectStorage/test-resource (default) True True }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` NAME RESOURCE SYNCED READY STATUS ObjectStorage/test-resource (default) True True @@ -96,15 +98,16 @@ ObjectStorage/test-resource (default) True True }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` -NAME VERSION INSTALLED HEALTHY STATE STATUS -Configuration/platform-ref-aws v0.9.0 True True - HealthyPackageRevision -├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 v0.9.0 True True Active HealthyPackageRevision -└─ Configuration/upbound-configuration-aws-network upbound-configuration-aws-network v0.7.0 True True - HealthyPackageRevision - ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 v0.7.0 True True Active HealthyPackageRevision - └─ Provider/upbound-provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: ...der-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] - ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 v0.47.0 True False Active UnhealthyPackageRevision: ...ider package deployment has no condition of type "Available" yet - └─ Provider/upbound-provider-aws-something v0.47.0 True - - ActivePackageRevision +NAME VERSION INSTALLED HEALTHY STATE STATUS +Configuration/platform-ref-aws v0.9.0 True True - HealthyPackageRevision +├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 v0.9.0 True True Active HealthyPackageRevision +└─ Configuration/upbound-configuration-aws-network v0.7.0 True True - HealthyPackageRevision + ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 v0.7.0 True True Active HealthyPackageRevision + └─ Provider/upbound-provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: ...der-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] + ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 v0.47.0 True False Active UnhealthyPackageRevision: ...ider package deployment has no condition of type "Available" yet + └─ Provider/upbound-provider-aws-something v0.47.0 True - - ActivePackageRevision `, err: nil, }, @@ -117,15 +120,16 @@ Configuration/platform-ref-aws }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` -NAME PACKAGE VERSION INSTALLED HEALTHY STATE STATUS -Configuration/platform-ref-aws xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True - HealthyPackageRevision -├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True Active HealthyPackageRevision -└─ Configuration/upbound-configuration-aws-network upbound-configuration-aws-network xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True - HealthyPackageRevision - ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True Active HealthyPackageRevision - └─ Provider/upbound-provider-aws-ec2 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: cannot resolve package dependencies: incompatible dependencies: [xpkg.upbound.io/crossplane-contrib/provider-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] - ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True False Active UnhealthyPackageRevision: post establish runtime hook failed for package: provider package deployment has no condition of type "Available" yet - └─ Provider/upbound-provider-aws-something xpkg.upbound.io/upbound/provider-aws-something v0.47.0 True - - ActivePackageRevision +NAME PACKAGE VERSION INSTALLED HEALTHY STATE STATUS +Configuration/platform-ref-aws xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True - HealthyPackageRevision +├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True Active HealthyPackageRevision +└─ Configuration/upbound-configuration-aws-network xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True - HealthyPackageRevision + ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True Active HealthyPackageRevision + └─ Provider/upbound-provider-aws-ec2 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: cannot resolve package dependencies: incompatible dependencies: [xpkg.upbound.io/crossplane-contrib/provider-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] + ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True False Active UnhealthyPackageRevision: post establish runtime hook failed for package: provider package deployment has no condition of type "Available" yet + └─ Provider/upbound-provider-aws-something xpkg.upbound.io/upbound/provider-aws-something v0.47.0 True - - ActivePackageRevision `, err: nil, }, diff --git a/cmd/crank/beta/trace/internal/printer/dot_test.go b/cmd/crank/beta/trace/internal/printer/dot_test.go index e1cfab84c..1fe7fb127 100644 --- a/cmd/crank/beta/trace/internal/printer/dot_test.go +++ b/cmd/crank/beta/trace/internal/printer/dot_test.go @@ -67,7 +67,7 @@ func TestPrintDotGraph(t *testing.T) { n1[label="Name: platform-ref-aws\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/platform-ref-aws:v0.9.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; n2[label="Name: platform-ref-aws-9ad7b5db2899\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/platform-ref-aws:v0.9.0\nHealthy: True\nState: HealthyPackageRevision\n",penwidth="2"]; - n3[label="Name: upbound-configuration-aws-network upbound-configuration-aws-network\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; + n3[label="Name: upbound-configuration-aws-network\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; n4[label="Name: upbound-configuration-aws-network-97be9100cfe1\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nHealthy: True\nState: HealthyPackageRevision\n",penwidth="2"]; n5[label="Name: upbound-provider-aws-ec2\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/provider-aws-ec2:v0.47.0\nInstalled: True\nHealthy: Unknown\n",penwidth="2"]; n6[label="Name: upbound-provider-aws-ec2-9ad7b5db2899\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/provider-aws-ec2:v0.47.0\nHealthy: False\nState: UnhealthyPackageRevision\n",penwidth="2"]; diff --git a/cmd/crank/beta/trace/internal/printer/printer_test.go b/cmd/crank/beta/trace/internal/printer/printer_test.go index 2e93a8384..e95b0f747 100644 --- a/cmd/crank/beta/trace/internal/printer/printer_test.go +++ b/cmd/crank/beta/trace/internal/printer/printer_test.go @@ -219,7 +219,7 @@ func GetComplexPackage() *resource.Resource { WithDesiredState(v1.PackageRevisionActive)), }, { - Unstructured: DummyPackage(v1.ConfigurationGroupVersionKind, "upbound-configuration-aws-network upbound-configuration-aws-network", + Unstructured: DummyPackage(v1.ConfigurationGroupVersionKind, "upbound-configuration-aws-network", WithConditions(v1.Active(), v1.Healthy()), WithPackage("xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0")), Children: []*resource.Resource{ diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 836988763..580d004a9 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -66,7 +66,7 @@ const ( errGetUsed = "cannot get used" errAddOwnerToUsage = "cannot update usage resource with owner ref" errAddDetailsAnnotation = "cannot update usage resource with details annotation" - errAddInUseLabel = "cannot add in use use label to the used resource" + errAddInUseLabel = "cannot add in use label to the used resource" errRemoveInUseLabel = "cannot remove in use label from the used resource" errAddFinalizer = "cannot add finalizer" errRemoveFinalizer = "cannot remove finalizer" diff --git a/pkg/validation/apiextensions/v1/composition/validator_test.go b/pkg/validation/apiextensions/v1/composition/validator_test.go index 64a622e0b..c8723b3a2 100644 --- a/pkg/validation/apiextensions/v1/composition/validator_test.go +++ b/pkg/validation/apiextensions/v1/composition/validator_test.go @@ -91,7 +91,7 @@ func TestValidatorValidate(t *testing.T) { }, }, "RejectStrictInvalidFromFieldPath": { - reason: "Should reject a Composition with a patch using a field not allowed by the the Composite resource, if all CRDs are found", + reason: "Should reject a Composition with a patch using a field not allowed by the Composite resource, if all CRDs are found", want: want{ errs: field.ErrorList{ { diff --git a/test/e2e/config/environment.go b/test/e2e/config/environment.go index 30be698be..bc20a0d6b 100644 --- a/test/e2e/config/environment.go +++ b/test/e2e/config/environment.go @@ -103,7 +103,7 @@ func NewEnvironmentFromFlags() Environment { suites: map[string]testSuite{}, } c.kindClusterName = flag.String("kind-cluster-name", "", "name of the kind cluster to use") - c.kindLogsLocation = flag.String("kind-logs-location", "", "destination of the the kind cluster logs on failure") + c.kindLogsLocation = flag.String("kind-logs-location", "", "destination of the kind cluster logs on failure") c.createKindCluster = flag.Bool("create-kind-cluster", true, "create a kind cluster (and deploy Crossplane) before running tests, if the cluster does not already exist with the same name") c.destroyKindCluster = flag.Bool("destroy-kind-cluster", true, "destroy the kind cluster when tests complete") c.preinstallCrossplane = flag.Bool("preinstall-crossplane", true, "install Crossplane before running tests") From 682359972e13f9c004a7f9fd31cf838b35172786 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 21:29:38 -0800 Subject: [PATCH 026/370] Enable containedctx linter This linter makes sure you don't embed a context in a struct. We allow this in our table-driven tests, where we use structs to capture the arguments to be passed to functions. Signed-off-by: Nic Cope --- .golangci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 268a5b70b..b896a9ffc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -45,7 +45,6 @@ linters: - interfacebloat - gochecknoglobals - forcetypeassert - - containedctx # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -223,6 +222,7 @@ issues: - scopelint - unparam - gochecknoinits + - containedctx # Ease some gocritic warnings on test files. - path: _test\.go From 0c4be0b2dea02b59bc32e328fb4839b7f5043793 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 22:35:50 -0800 Subject: [PATCH 027/370] Enable the forcetypeassert linter Lots of //nolint comments for this one, but it seems worth at least prompting folks to think about. Signed-off-by: Nic Cope --- .golangci.yml | 2 +- .../apiextensions/claim/connection.go | 2 + .../controller/apiextensions/composite/api.go | 2 + .../apiextensions/composite/composition_pt.go | 2 +- .../composite/composition_transforms.go | 111 ++++++++++++++---- .../composite/composition_transforms_test.go | 2 +- .../composite/environment_selector.go | 6 +- .../apiextensions/definition/composed.go | 4 +- .../apiextensions/usage/reconciler.go | 1 + internal/controller/pkg/manager/reconciler.go | 13 +- .../controller/pkg/revision/establisher.go | 13 +- .../controller/pkg/revision/reconciler.go | 19 ++- .../pkg/revision/runtime_function.go | 5 +- .../controller/rbac/definition/reconciler.go | 6 +- .../controller/rbac/namespace/reconciler.go | 6 +- .../rbac/provider/binding/reconciler.go | 6 +- .../rbac/provider/roles/reconciler.go | 6 +- .../initializer/webhook_configurations.go | 2 +- internal/usage/handler.go | 2 +- .../apiextensions/v1/composition/handler.go | 2 +- internal/xpkg/fetch.go | 6 +- .../apiextensions/v1/composition/patches.go | 2 +- 22 files changed, 150 insertions(+), 70 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b896a9ffc..a3aa7cf8c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -44,7 +44,6 @@ linters: - gomnd - interfacebloat - gochecknoglobals - - forcetypeassert # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -223,6 +222,7 @@ issues: - unparam - gochecknoinits - containedctx + - forcetypeassert # Ease some gocritic warnings on test files. - path: _test\.go diff --git a/internal/controller/apiextensions/claim/connection.go b/internal/controller/apiextensions/claim/connection.go index c40bf8ece..2e48506bf 100644 --- a/internal/controller/apiextensions/claim/connection.go +++ b/internal/controller/apiextensions/claim/connection.go @@ -146,6 +146,8 @@ func (a *APIConnectionPropagator) PropagateConnection(ctx context.Context, to re resource.AllowUpdateIf(func(current, desired runtime.Object) bool { // We consider the update to be a no-op and don't allow it if the // current and existing secret data are identical. + + //nolint:forcetypeassert // These will always be secrets. return !cmp.Equal(current.(*corev1.Secret).Data, desired.(*corev1.Secret).Data, cmpopts.EquateEmpty()) }), ) diff --git a/internal/controller/apiextensions/composite/api.go b/internal/controller/apiextensions/composite/api.go index 248f83b30..ede13bd99 100644 --- a/internal/controller/apiextensions/composite/api.go +++ b/internal/controller/apiextensions/composite/api.go @@ -99,6 +99,8 @@ func (a *APIFilteredSecretPublisher) PublishConnection(ctx context.Context, o re resource.AllowUpdateIf(func(current, desired runtime.Object) bool { // We consider the update to be a no-op and don't allow it if the // current and existing secret data are identical. + + //nolint:forcetypeassert // These will always be secrets. return !cmp.Equal(current.(*corev1.Secret).Data, desired.(*corev1.Secret).Data, cmpopts.EquateEmpty()) }), ) diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 861bcf874..4561534f3 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -344,7 +344,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // be rejected by the API server. This will trigger an immediate requeue, // and we'll proceed to update the status as soon as there are no changes to // be made to the spec. - objCopy := xr.DeepCopyObject().(client.Object) + objCopy := xr.DeepCopy() if err := c.client.Apply(ctx, objCopy, mergeOptions(toXRPatchesFromTAs(tas))...); err != nil { return CompositionResult{}, errors.Wrap(err, errUpdate) } diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 0547a1e81..1940247ef 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -419,7 +419,8 @@ func ResolveConvert(t v1.ConvertTransform, input any) (any, error) { if err != nil { return nil, err } - return f(input) + out, err := f(input) + return out, errors.Wrapf(err, "cannot convert value %s", input) } type conversionPair struct { @@ -456,63 +457,123 @@ func GetConversionFunc(t *v1.ConvertTransform, from v1.TransformIOType) (func(an // may return an error. var conversions = map[conversionPair]func(any) (any, error){ {from: v1.TransformIOTypeString, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseInt(i.(string), 10, 64) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseInt(s, 10, 64) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseBool(i.(string)) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseBool(s) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseFloat(i.(string), 64) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseFloat(s, 64) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatQuantity}: func(i any) (any, error) { - q, err := resource.ParseQuantity(i.(string)) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + q, err := resource.ParseQuantity(s) if err != nil { return nil, err } return q.AsApproximateFloat64(), nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatInt(i.(int64), 10), nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return strconv.FormatInt(i64, 10), nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return i.(int64) == 1, nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return i64 == 1, nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return float64(i.(int64)), nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return float64(i64), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatBool(i.(bool)), nil + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + return strconv.FormatBool(b), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - if i.(bool) { + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + if b { return int64(1), nil } return int64(0), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - if i.(bool) { + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + if b { return float64(1), nil } return float64(0), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatFloat(i.(float64), 'f', -1, 64), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return strconv.FormatFloat(f64, 'f', -1, 64), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return int64(i.(float64)), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return int64(f64), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return i.(float64) == float64(1), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return f64 == float64(1), nil }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeObject, format: v1.ConvertTransformFormatJSON}: func(i any) (any, error) { + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } o := map[string]any{} - return o, json.Unmarshal([]byte(i.(string)), &o) + return o, json.Unmarshal([]byte(s), &o) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeArray, format: v1.ConvertTransformFormatJSON}: func(i any) (any, error) { + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } var o []any - return o, json.Unmarshal([]byte(i.(string)), &o) + return o, json.Unmarshal([]byte(s), &o) }, } diff --git a/internal/controller/apiextensions/composite/composition_transforms_test.go b/internal/controller/apiextensions/composite/composition_transforms_test.go index 228e3311d..d00e22231 100644 --- a/internal/controller/apiextensions/composite/composition_transforms_test.go +++ b/internal/controller/apiextensions/composite/composition_transforms_test.go @@ -1127,7 +1127,7 @@ func TestConvertResolve(t *testing.T) { format: (*v1.ConvertTransformFormat)(ptr.To(string(v1.ConvertTransformFormatQuantity))), }, want: want{ - err: resource.ErrFormatWrong, + err: errors.Wrap(resource.ErrFormatWrong, "cannot convert value 1000 blabla"), }, }, "SameTypeNoOp": { diff --git a/internal/controller/apiextensions/composite/environment_selector.go b/internal/controller/apiextensions/composite/environment_selector.go index f5bc2991f..0eff504e6 100644 --- a/internal/controller/apiextensions/composite/environment_selector.go +++ b/internal/controller/apiextensions/composite/environment_selector.go @@ -231,11 +231,11 @@ func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { vali, valj := p[i].val, p[j].val switch valsKind { //nolint:exhaustive // we only support these types case reflect.Float64: - return vali.(float64) < valj.(float64) + return vali.(float64) < valj.(float64) //nolint:forcetypeassert // Checked by reflect. case reflect.Int64: - return vali.(int64) < valj.(int64) + return vali.(int64) < valj.(int64) //nolint:forcetypeassert // Checked by reflect. case reflect.String: - return vali.(string) < valj.(string) + return vali.(string) < valj.(string) //nolint:forcetypeassert // Checked by reflect. default: // should never happen err = errors.Errorf(errFmtSortUnknownType, valsKind) diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go index 0f3f108ea..3abd6c1b0 100644 --- a/internal/controller/apiextensions/definition/composed.go +++ b/internal/controller/apiextensions/definition/composed.go @@ -165,8 +165,8 @@ func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupV if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(client.Object) - obj := newObj.(client.Object) + old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. + obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. if old.GetResourceVersion() == obj.GetResourceVersion() { return } diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 580d004a9..f34edfb40 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -452,6 +452,7 @@ func RespectOwnerRefs() xpresource.ApplyOption { // This is a Usage resource, so we need to respect existing owner // references in case it has any. if len(cu.GetOwnerReferences()) > 0 { + //nolint:forcetypeassert // This will always be a metav1.Object. desired.(metav1.Object).SetOwnerReferences(cu.GetOwnerReferences()) } return nil diff --git a/internal/controller/pkg/manager/reconciler.go b/internal/controller/pkg/manager/reconciler.go index d65f8866e..0fef6dabc 100644 --- a/internal/controller/pkg/manager/reconciler.go +++ b/internal/controller/pkg/manager/reconciler.go @@ -418,12 +418,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco pr.SetSkipDependencyResolution(p.GetSkipDependencyResolution()) pr.SetCommonLabels(p.GetCommonLabels()) - if pwr, ok := p.(v1.PackageWithRuntime); ok { - pwrr := pr.(v1.PackageRevisionWithRuntime) - pwrr.SetRuntimeConfigRef(pwr.GetRuntimeConfigRef()) - pwrr.SetControllerConfigRef(pwr.GetControllerConfigRef()) - pwrr.SetTLSServerSecretName(pwr.GetTLSServerSecretName()) - pwrr.SetTLSClientSecretName(pwr.GetTLSClientSecretName()) + pwr, pwok := p.(v1.PackageWithRuntime) + prwr, prok := pr.(v1.PackageRevisionWithRuntime) + if pwok && prok { + prwr.SetRuntimeConfigRef(pwr.GetRuntimeConfigRef()) + prwr.SetControllerConfigRef(pwr.GetControllerConfigRef()) + prwr.SetTLSServerSecretName(pwr.GetTLSServerSecretName()) + prwr.SetTLSClientSecretName(pwr.GetTLSClientSecretName()) } // If current revision is not active, and we have an automatic or diff --git a/internal/controller/pkg/revision/establisher.go b/internal/controller/pkg/revision/establisher.go index 47c1c2718..cac651785 100644 --- a/internal/controller/pkg/revision/establisher.go +++ b/internal/controller/pkg/revision/establisher.go @@ -239,7 +239,7 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa g.Go(func() error { // Assert desired object to resource.Object so that we can access its // metadata. - d, ok := res.(resource.Object) + desired, ok := res.(resource.Object) if !ok { return errors.New(errAssertResourceObj) } @@ -257,7 +257,7 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa if !ok { return errors.New(errAssertClientObj) } - err := e.client.Get(ctx, types.NamespacedName{Name: d.GetName(), Namespace: d.GetNamespace()}, current) + err := e.client.Get(ctx, types.NamespacedName{Name: desired.GetName(), Namespace: desired.GetNamespace()}, current) if resource.IgnoreNotFound(err) != nil { return err } @@ -268,26 +268,25 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa // We will not create a resource if we are not going to control it, // so we don't need to check with dry run. if control { - if err := e.create(ctx, d, parent, client.DryRunAll); err != nil { + if err := e.create(ctx, desired, parent, client.DryRunAll); err != nil { return err } } // Add to objects as not existing. select { - case out <- currentDesired{Desired: d, Current: nil, Exists: false}: + case out <- currentDesired{Desired: desired, Current: nil, Exists: false}: return nil case <-ctx.Done(): return ctx.Err() } } - c := current.(resource.Object) - if err := e.update(ctx, c, d, parent, control, client.DryRunAll); err != nil { + if err := e.update(ctx, current, desired, parent, control, client.DryRunAll); err != nil { return err } // Add to objects as existing. select { - case out <- currentDesired{Desired: d, Current: c, Exists: true}: + case out <- currentDesired{Desired: desired, Current: current, Exists: true}: return nil case <-ctx.Done(): return ctx.Err() diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index c08f96cb9..3b59a6584 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -548,9 +548,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } var runtimeManifestBuilder ManifestBuilder - if r.runtimeHook != nil { - pwr := pr.(v1.PackageRevisionWithRuntime) - + pwr, hasRuntime := pr.(v1.PackageRevisionWithRuntime) + if hasRuntime && r.runtimeHook != nil { opts, err := r.runtimeManifestBuilderOptions(ctx, pwr) if err != nil { log.Debug(errManifestBuilderOptions, "error", err) @@ -743,7 +742,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) - pmo := pkgMeta.(metav1.Object) + pmo := pkgMeta.(metav1.Object) //nolint:forcetypeassert // Will always be metav1.Object. meta.AddLabels(pr, pmo.GetLabels()) meta.AddAnnotations(pr, pmo.GetAnnotations()) if err := r.client.Update(ctx, pr); err != nil { @@ -796,8 +795,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } - if r.runtimeHook != nil { - pwr := pr.(v1.PackageRevisionWithRuntime) + if hasRuntime && r.runtimeHook != nil { if err := r.runtimeHook.Pre(ctx, pkgMeta, pwr, runtimeManifestBuilder); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil @@ -846,8 +844,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco }) pr.SetObjects(refs) - if r.runtimeHook != nil { - if err := r.runtimeHook.Post(ctx, pkgMeta, pr.(v1.PackageRevisionWithRuntime), runtimeManifestBuilder); err != nil { + if hasRuntime && r.runtimeHook != nil { + if err := r.runtimeHook.Post(ctx, pkgMeta, pwr, runtimeManifestBuilder); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } @@ -882,12 +880,13 @@ func (r *Reconciler) deactivateRevision(ctx context.Context, pr v1.PackageRevisi return errors.Wrap(err, errReleaseObjects) } - if r.runtimeHook == nil { + prwr, ok := pr.(v1.PackageRevisionWithRuntime) + if !ok || r.runtimeHook == nil { return nil } // Call deactivation hook. - if err := r.runtimeHook.Deactivate(ctx, pr.(v1.PackageRevisionWithRuntime), runtimeManifestBuilder); err != nil { + if err := r.runtimeHook.Deactivate(ctx, prwr, runtimeManifestBuilder); err != nil { return errors.Wrap(err, errDeactivationHook) } diff --git a/internal/controller/pkg/revision/runtime_function.go b/internal/controller/pkg/revision/runtime_function.go index b4b0359de..b0bed8439 100644 --- a/internal/controller/pkg/revision/runtime_function.go +++ b/internal/controller/pkg/revision/runtime_function.go @@ -91,7 +91,10 @@ func (h *FunctionHooks) Pre(ctx context.Context, _ runtime.Object, pr v1.Package } // N.B.: We expect the revision to be applied by the caller - fRev := pr.(*v1beta1.FunctionRevision) + fRev, ok := pr.(*v1beta1.FunctionRevision) + if !ok { + return errors.Errorf("cannot apply function package hooks to %T", pr) + } fRev.Status.Endpoint = fmt.Sprintf(serviceEndpointFmt, svc.Name, svc.Namespace, servicePort) secServer := build.TLSServerSecret() diff --git a/internal/controller/rbac/definition/reconciler.go b/internal/controller/rbac/definition/reconciler.go index fef32ff2e..6667bc0a6 100644 --- a/internal/controller/rbac/definition/reconciler.go +++ b/internal/controller/rbac/definition/reconciler.go @@ -222,7 +222,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // ClusterRoles. We consider ClusterRoles to be different if their labels and // rules do not match. func ClusterRolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRole) - d := desired.(*rbacv1.ClusterRole) + // Calling this with anything but ClusterRoles is a programming error. If it + // happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. return !cmp.Equal(c.GetLabels(), d.GetLabels()) || !cmp.Equal(c.Rules, d.Rules) } diff --git a/internal/controller/rbac/namespace/reconciler.go b/internal/controller/rbac/namespace/reconciler.go index fdeecb332..2b30bb389 100644 --- a/internal/controller/rbac/namespace/reconciler.go +++ b/internal/controller/rbac/namespace/reconciler.go @@ -229,8 +229,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // RolesDiffer returns true if the supplied objects are different Roles. We // consider Roles to be different if their crossplane annotations or rules do not match. func RolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.Role) - d := desired.(*rbacv1.Role) + // Calling this with anything but Roles is a programming error. If it + // happens, we probably do want to panic. + c := current.(*rbacv1.Role) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.Role) //nolint:forcetypeassert // See above. return !equalRolesAnnotations(c, d) || !cmp.Equal(c.Rules, d.Rules) } diff --git a/internal/controller/rbac/provider/binding/reconciler.go b/internal/controller/rbac/provider/binding/reconciler.go index 30c9cea79..ef3758521 100644 --- a/internal/controller/rbac/provider/binding/reconciler.go +++ b/internal/controller/rbac/provider/binding/reconciler.go @@ -241,7 +241,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // consider ClusterRoleBindings to be different if the subjects, the roleRefs, or the owner ref // is different. func ClusterRoleBindingsDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRoleBinding) - d := desired.(*rbacv1.ClusterRoleBinding) + // Calling this with anything but ClusterRoleBindings is a programming + // error. If it happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRoleBinding) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRoleBinding) //nolint:forcetypeassert // See above. return !cmp.Equal(c.Subjects, d.Subjects) || !cmp.Equal(c.RoleRef, d.RoleRef) || !cmp.Equal(c.GetOwnerReferences(), d.GetOwnerReferences()) } diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index 97e34020a..2970620fd 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -399,8 +399,10 @@ func DefinedResources(refs []xpv1.TypedReference) []Resource { // ClusterRoles. We consider ClusterRoles to be different if their labels and // rules do not match. func ClusterRolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRole) - d := desired.(*rbacv1.ClusterRole) + // Calling this with anything but ClusterRoles is a programming error. If it + // happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. return !cmp.Equal(c.GetLabels(), d.GetLabels()) || !cmp.Equal(c.Rules, d.Rules) } diff --git a/internal/initializer/webhook_configurations.go b/internal/initializer/webhook_configurations.go index 1e5b9bd4f..d126f2e01 100644 --- a/internal/initializer/webhook_configurations.go +++ b/internal/initializer/webhook_configurations.go @@ -132,7 +132,7 @@ func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) err default: return errors.Errorf("only MutatingWebhookConfiguration and ValidatingWebhookConfiguration kinds are accepted, got %T", obj) } - if err := pa.Apply(ctx, obj.(client.Object)); err != nil { + if err := pa.Apply(ctx, obj.(client.Object)); err != nil { //nolint:forcetypeassert // Should always be a client.Object. return errors.Wrap(err, errApplyWebhookConfiguration) } } diff --git a/internal/usage/handler.go b/internal/usage/handler.go index 3bc59d576..6017f01bf 100644 --- a/internal/usage/handler.go +++ b/internal/usage/handler.go @@ -68,7 +68,7 @@ func indexValue(apiVersion, kind, name string) string { func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error { indexer := mgr.GetFieldIndexer() if err := indexer.IndexField(context.Background(), &v1alpha1.Usage{}, InUseIndexKey, func(obj client.Object) []string { - u := obj.(*v1alpha1.Usage) + u := obj.(*v1alpha1.Usage) //nolint:forcetypeassert // Will always be a Usage. if u.Spec.Of.ResourceRef == nil || len(u.Spec.Of.ResourceRef.Name) == 0 { return []string{} } diff --git a/internal/validation/apiextensions/v1/composition/handler.go b/internal/validation/apiextensions/v1/composition/handler.go index 7acc4bb4c..66984b7a0 100644 --- a/internal/validation/apiextensions/v1/composition/handler.go +++ b/internal/validation/apiextensions/v1/composition/handler.go @@ -60,7 +60,7 @@ func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error // The index is used by the getCRD function below. indexer := mgr.GetFieldIndexer() if err := indexer.IndexField(context.Background(), &extv1.CustomResourceDefinition{}, crdsIndexKey, func(obj client.Object) []string { - return []string{getIndexValueForCRD(obj.(*extv1.CustomResourceDefinition))} + return []string{getIndexValueForCRD(obj.(*extv1.CustomResourceDefinition))} //nolint:forcetypeassert // Will always be a CRD. }); err != nil { return err } diff --git a/internal/xpkg/fetch.go b/internal/xpkg/fetch.go index 4963d382a..9ae40b97b 100644 --- a/internal/xpkg/fetch.go +++ b/internal/xpkg/fetch.go @@ -108,9 +108,13 @@ func WithServiceAccount(sa string) FetcherOpt { // NewK8sFetcher creates a new K8sFetcher. func NewK8sFetcher(client kubernetes.Interface, opts ...FetcherOpt) (*K8sFetcher, error) { + dt, ok := remote.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.Errorf("default transport was not a %T", &http.Transport{}) + } k := &K8sFetcher{ client: client, - transport: remote.DefaultTransport.(*http.Transport).Clone(), + transport: dt.Clone(), } for _, o := range opts { diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 0fe2d8bfb..6768096c7 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -526,7 +526,7 @@ func GetBaseObject(ct *v1.ComposedTemplate) (client.Object, error) { ct.Base.Object = cd } if ct, ok := ct.Base.Object.(client.Object); ok { - return ct.DeepCopyObject().(client.Object), nil + return ct.DeepCopyObject().(client.Object), nil //nolint:forcetypeassert // Deepcopy will always be the same type. } return nil, errors.New("base object is not a client.Object") } From 3b0262e4975b124cd4761022cf7414aa189b1d8e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 23:05:23 -0800 Subject: [PATCH 028/370] Enable gochecknoglobals linter We often use global variables to represent data that is by convention constant, but that can't be constant because it's mutable. So there's a few nolint comments for this, but I think it's still good to discourage folks. Signed-off-by: Nic Cope --- .golangci.yml | 3 ++- cmd/crank/beta/convert/deploymentruntime/converter.go | 7 ++----- cmd/crank/main.go | 6 +++--- cmd/crossplane/core/core.go | 4 ++-- cmd/crossplane/main.go | 4 ++-- cmd/crossplane/rbac/rbac.go | 2 +- .../apiextensions/composite/composition_transforms.go | 2 +- internal/controller/pkg/revision/runtime.go | 1 + internal/controller/rbac/definition/roles.go | 1 + internal/controller/rbac/provider/roles/roles.go | 3 +++ internal/initializer/cert_generator.go | 2 +- internal/metrics/metrics.go | 6 +++++- internal/transport/transport.go | 4 +++- internal/xcrd/schemas.go | 4 ++-- test/e2e/funcs/feature.go | 2 +- 15 files changed, 30 insertions(+), 21 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a3aa7cf8c..844fd8a7b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -43,7 +43,6 @@ linters: - inamedparam - gomnd - interfacebloat - - gochecknoglobals # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -221,6 +220,7 @@ issues: - scopelint - unparam - gochecknoinits + - gochecknoglobals - containedctx - forcetypeassert @@ -235,6 +235,7 @@ issues: - path: apis/ linters: - gochecknoinits + - gochecknoglobals # These are performance optimisations rather than style issues per se. # They warn when function arguments or range values copy a lot of memory diff --git a/cmd/crank/beta/convert/deploymentruntime/converter.go b/cmd/crank/beta/convert/deploymentruntime/converter.go index ea26129ac..f5efb558b 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter.go @@ -18,7 +18,6 @@ package deploymentruntime import ( "errors" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -36,8 +35,6 @@ const ( errNilControllerConfig = "ControllerConfig is nil" ) -var timeNow = time.Now() - // controllerConfigToDeploymentRuntimeConfig converts a ControllerConfig to // a DeploymentRuntimeConfig. func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (*v1beta1.DeploymentRuntimeConfig, error) { @@ -49,7 +46,7 @@ func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (* withName(cc.Name), // set the creation timestamp due to https://github.com/kubernetes/kubernetes/issues/109427 // to be removed when fixed. k8s apply ignores this field - withCreationTimestamp(metav1.NewTime(timeNow)), + withCreationTimestamp(metav1.Now()), withServiceAccountTemplate(cc), withServiceTemplate(cc), withDeploymentTemplate(dt), @@ -80,7 +77,7 @@ func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1be // set the creation timestamp due to https://github.com/kubernetes/kubernetes/issues/109427 // to be removed when fixed. k8s apply ignores this field if cc.CreationTimestamp.IsZero() || dt.Spec.Template.ObjectMeta.CreationTimestamp.IsZero() { - dt.Spec.Template.ObjectMeta.CreationTimestamp = metav1.NewTime(timeNow) + dt.Spec.Template.ObjectMeta.CreationTimestamp = metav1.Now() } if cc.Spec.Metadata != nil { diff --git a/cmd/crank/main.go b/cmd/crank/main.go index 928ac155f..f5c13e55f 100644 --- a/cmd/crank/main.go +++ b/cmd/crank/main.go @@ -30,7 +30,7 @@ import ( "github.com/crossplane/crossplane/internal/version" ) -var _ = kong.Must(&cli) +var _ = kong.Must(&cli{}) type ( versionFlag string @@ -58,7 +58,7 @@ func (v verboseFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // } // The top-level crossplane CLI. -var cli struct { +type cli struct { // Subcommands and flags will appear in the CLI help output in the same // order they're specified here. Keep them in alphabetical order. @@ -76,7 +76,7 @@ var cli struct { func main() { logger := logging.NewNopLogger() - ctx := kong.Parse(&cli, + ctx := kong.Parse(&cli{}, kong.Name("crossplane"), kong.Description("A command line tool for interacting with Crossplane."), // Binding a variable to kong context makes it available to all commands diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 5196fa48c..358ffafac 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -68,9 +68,9 @@ type Command struct { // KongVars represent the kong variables associated with the CLI parser // required for the Registry default variable interpolation. -var KongVars = kong.Vars{ +var KongVars = kong.Vars{ //nolint:gochecknoglobals // We treat these as constants. "default_registry": xpkg.DefaultRegistry, - "default_user_agent": transport.DefaultUserAgent, + "default_user_agent": transport.DefaultUserAgent(), } // Run is the no-op method required for kong call tree diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index 1ac5e5500..c1a3f586e 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -46,7 +46,7 @@ type ( versionFlag bool ) -var cli struct { +type cli struct { Debug debugFlag `help:"Print verbose logging statements." short:"d"` Version versionFlag `help:"Print version and quit." short:"v"` @@ -97,7 +97,7 @@ func main() { // objects. s := runtime.NewScheme() - ctx := kong.Parse(&cli, + ctx := kong.Parse(&cli{}, kong.Name("crossplane"), kong.Description("An open source multicloud control plane."), kong.BindTo(logging.NewLogrLogger(zl), (*logging.Logger)(nil)), diff --git a/cmd/crossplane/rbac/rbac.go b/cmd/crossplane/rbac/rbac.go index 671918435..db170b100 100644 --- a/cmd/crossplane/rbac/rbac.go +++ b/cmd/crossplane/rbac/rbac.go @@ -45,7 +45,7 @@ const ( // KongVars represent the kong variables associated with the CLI parser // required for the RBAC enum interpolation. -var KongVars = kong.Vars{ +var KongVars = kong.Vars{ //nolint:gochecknoglobals // We treat these as constants. "rbac_manage_default_var": ManagementPolicyBasic, "rbac_manage_enum_var": strings.Join( []string{ diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 1940247ef..5cbac5256 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -455,7 +455,7 @@ func GetConversionFunc(t *v1.ConvertTransform, from v1.TransformIOType) (func(an // The unparam linter is complaining that these functions always return a nil // error, but we need this to be the case given some other functions in the map // may return an error. -var conversions = map[conversionPair]func(any) (any, error){ +var conversions = map[conversionPair]func(any) (any, error){ //nolint:gochecknoglobals // We treat this map as a constant. {from: v1.TransformIOTypeString, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { s, ok := i.(string) if !ok { diff --git a/internal/controller/pkg/revision/runtime.go b/internal/controller/pkg/revision/runtime.go index ef123430d..50e5f3f58 100644 --- a/internal/controller/pkg/revision/runtime.go +++ b/internal/controller/pkg/revision/runtime.go @@ -58,6 +58,7 @@ const ( tlsClientCertsDir = "/tls/client" ) +//nolint:gochecknoglobals // We treat these as constants, but take their addresses. var ( runAsUser = int64(2000) runAsGroup = int64(2000) diff --git a/internal/controller/rbac/definition/roles.go b/internal/controller/rbac/definition/roles.go index 9fc5ddf28..8de8dbbb0 100644 --- a/internal/controller/rbac/definition/roles.go +++ b/internal/controller/rbac/definition/roles.go @@ -53,6 +53,7 @@ const ( suffixFinalizers = "/finalizers" ) +//nolint:gochecknoglobals // We treat these as constants. var ( verbsEdit = []string{rbacv1.VerbAll} verbsView = []string{"get", "list", "watch"} diff --git a/internal/controller/rbac/provider/roles/roles.go b/internal/controller/rbac/provider/roles/roles.go index e39bb92d1..f3a83c64b 100644 --- a/internal/controller/rbac/provider/roles/roles.go +++ b/internal/controller/rbac/provider/roles/roles.go @@ -51,6 +51,7 @@ const ( pluralLeases = "leases" ) +//nolint:gochecknoglobals // We treat these as constants. var ( verbsEdit = []string{rbacv1.VerbAll} verbsView = []string{"get", "list", "watch"} @@ -66,6 +67,8 @@ var ( // * ConfigMaps for leader election. // * Leases for leader election. // * Events for debugging. +// +//nolint:gochecknoglobals // We treat this as a constant. var rulesSystemExtra = []rbacv1.PolicyRule{ { APIGroups: []string{"", coordinationv1.GroupName}, diff --git a/internal/initializer/cert_generator.go b/internal/initializer/cert_generator.go index e187a8f2e..173480da6 100644 --- a/internal/initializer/cert_generator.go +++ b/internal/initializer/cert_generator.go @@ -43,7 +43,7 @@ type CertificateGenerator interface { Generate(*x509.Certificate, *CertificateSigner) (key []byte, crt []byte, err error) } -var pkixName = pkix.Name{ +var pkixName = pkix.Name{ //nolint:gochecknoglobals // We treat this as a constant. CommonName: "Crossplane", Organization: []string{"Crossplane"}, Country: []string{"Earth"}, diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index 24ff7e8fa..35a49d7c3 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -18,7 +18,11 @@ package metrics import "sigs.k8s.io/controller-runtime/pkg/metrics" +// TODO(negz): Should we try to plumb the metrics registry down to all callers? +// I think this would be a good practice - similar to how we plumb the logger. +// On the other hand, using a global metrics registry is idiomatic for Prom. + // Registry is a Prometheus metrics registry. All Crossplane metrics should be // registered with it. Crossplane adds metrics to the registry created and // served by controller-runtime. -var Registry = metrics.Registry +var Registry = metrics.Registry //nolint:gochecknoglobals // See TODO above. diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 32d98198d..f95d296be 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -26,7 +26,9 @@ import ( // DefaultUserAgent is the default User-Agent header that is set when making // HTTP requests for packages. -var DefaultUserAgent = fmt.Sprintf("%s/%s", "crossplane", version.New().GetVersionString()) +func DefaultUserAgent() string { + return fmt.Sprintf("%s/%s", "crossplane", version.New().GetVersionString()) +} // UserAgent wraps a RoundTripper and injects a user agent header. type UserAgent struct { diff --git a/internal/xcrd/schemas.go b/internal/xcrd/schemas.go index c54fded53..d8fb00917 100644 --- a/internal/xcrd/schemas.go +++ b/internal/xcrd/schemas.go @@ -29,11 +29,11 @@ const ( ) // CompositionRevisionRef should be propagated dynamically. -var CompositionRevisionRef = "compositionRevisionRef" +const CompositionRevisionRef = "compositionRevisionRef" // PropagateSpecProps is the list of XRC spec properties to propagate // when translating an XRC into an XR. -var PropagateSpecProps = []string{"compositionRef", "compositionSelector", "compositionUpdatePolicy", "compositionRevisionSelector"} +var PropagateSpecProps = []string{"compositionRef", "compositionSelector", "compositionUpdatePolicy", "compositionRevisionSelector"} //nolint:gochecknoglobals // We treat this as a constant. // TODO(negz): Add descriptions to schema fields. diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index b5a2deff8..d7ac1b024 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -306,7 +306,7 @@ func (nf notFound) String() string { return "NotFound" } // NotFound is a special 'want' value that indicates the supplied path should // not be found. -var NotFound = notFound{} +var NotFound = notFound{} //nolint:gochecknoglobals // We treat this as a constant. // ResourcesHaveFieldValueWithin fails a test if the supplied resources do not // have the supplied value at the supplied field path within the supplied From 04c770c1ce09b8205c60ad5e9c621c44417494d0 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 23:14:46 -0800 Subject: [PATCH 029/370] Enable interfacebloat linter Signed-off-by: Nic Cope --- .golangci.yml | 4 +++- apis/pkg/v1/interfaces.go | 8 ++++---- internal/dag/dag.go | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 844fd8a7b..184e6ebbe 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -42,7 +42,6 @@ linters: - nilnil - inamedparam - gomnd - - interfacebloat # Below are linters that lint for things we don't value. Each entry below # this line must have a comment explaining the rationale. @@ -208,6 +207,9 @@ linters-settings: - pkg: github.com/onsi/gomega desc: "See https://go.dev/wiki/TestComments#assert-libraries" + interfacebloat: + max: 5 + issues: # Excluding configuration per-path and per-linter exclude-rules: diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index d3a1b2f8e..eb7c09d59 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -63,7 +63,7 @@ func RefNames(refs []corev1.LocalObjectReference) []string { // PackageWithRuntime is the interface satisfied by packages with runtime types. // +k8s:deepcopy-gen=false -type PackageWithRuntime interface { +type PackageWithRuntime interface { //nolint:interfacebloat // TODO(negz): Could this be composed of smaller interfaces? Package GetControllerConfigRef() *ControllerConfigReference @@ -79,7 +79,7 @@ type PackageWithRuntime interface { // Package is the interface satisfied by package types. // +k8s:deepcopy-gen=false -type Package interface { +type Package interface { //nolint:interfacebloat // TODO(negz): Could we break this up into smaller, composable interfaces? resource.Object resource.Conditioned @@ -379,7 +379,7 @@ func (p *Configuration) SetCommonLabels(l map[string]string) { // PackageRevisionWithRuntime is the interface satisfied by revision of packages // with runtime types. // +k8s:deepcopy-gen=false -type PackageRevisionWithRuntime interface { +type PackageRevisionWithRuntime interface { //nolint:interfacebloat // TODO(negz): Could this be composed of smaller interfaces? PackageRevision GetControllerConfigRef() *ControllerConfigReference @@ -397,7 +397,7 @@ type PackageRevisionWithRuntime interface { // PackageRevision is the interface satisfied by package revision types. // +k8s:deepcopy-gen=false -type PackageRevision interface { +type PackageRevision interface { //nolint:interfacebloat // TODO(negz): Could we break this up into smaller, composable interfaces? resource.Object resource.Conditioned diff --git a/internal/dag/dag.go b/internal/dag/dag.go index 37d3e2ac5..dc8838a1b 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -33,7 +33,7 @@ type Node interface { } // DAG is a Directed Acyclic Graph. -type DAG interface { +type DAG interface { //nolint:interfacebloat // TODO(negz): Could this be several smaller interfaces? Init(nodes []Node) ([]Node, error) AddNode(Node) error AddNodes(...Node) error From d3f1b44b9b2d6e0361fe784617854674d06ba0ea Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 23:26:10 -0800 Subject: [PATCH 030/370] Enable inamedparam linter This linter warns for interface methods without named parameters. Signed-off-by: Nic Cope --- .golangci.yml | 1 - apis/pkg/v1/interfaces.go | 4 ++-- cmd/crank/beta/trace/internal/printer/printer.go | 2 +- cmd/crank/beta/validate/cache.go | 4 ++-- .../apiextensions/composite/composition_pt.go | 2 +- .../controller/apiextensions/composite/reconciler.go | 2 +- internal/controller/pkg/manager/revisioner.go | 2 +- internal/controller/pkg/revision/runtime.go | 6 +++--- internal/dag/dag.go | 10 +++++----- internal/initializer/cert_generator.go | 2 +- internal/xpkg/upbound/config/source.go | 2 +- 11 files changed, 18 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 184e6ebbe..80c22baa0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -40,7 +40,6 @@ linters: - predeclared - ireturn - nilnil - - inamedparam - gomnd # Below are linters that lint for things we don't value. Each entry below diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index eb7c09d59..2b9500084 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -110,7 +110,7 @@ type Package interface { //nolint:interfacebloat // TODO(negz): Could we break t SetCurrentIdentifier(r string) GetSkipDependencyResolution() *bool - SetSkipDependencyResolution(*bool) + SetSkipDependencyResolution(skip *bool) GetCommonLabels() map[string]string SetCommonLabels(l map[string]string) @@ -425,7 +425,7 @@ type PackageRevision interface { //nolint:interfacebloat // TODO(negz): Could we SetRevision(r int64) GetSkipDependencyResolution() *bool - SetSkipDependencyResolution(*bool) + SetSkipDependencyResolution(skip *bool) GetDependencyStatus() (found, installed, invalid int64) SetDependencyStatus(found, installed, invalid int64) diff --git a/cmd/crank/beta/trace/internal/printer/printer.go b/cmd/crank/beta/trace/internal/printer/printer.go index 67b2143a6..9fbd68c60 100644 --- a/cmd/crank/beta/trace/internal/printer/printer.go +++ b/cmd/crank/beta/trace/internal/printer/printer.go @@ -43,7 +43,7 @@ const ( // Printer implements the interface which is used by all printers in this package. type Printer interface { - Print(io.Writer, *resource.Resource) error + Print(w io.Writer, r *resource.Resource) error } // New creates a new printer based on the specified type. diff --git a/cmd/crank/beta/validate/cache.go b/cmd/crank/beta/validate/cache.go index 2a19b035e..bf317cd5f 100644 --- a/cmd/crank/beta/validate/cache.go +++ b/cmd/crank/beta/validate/cache.go @@ -29,11 +29,11 @@ import ( // Cache defines an interface for caching schemas. type Cache interface { - Store([][]byte, string) error + Store(schemas [][]byte, path string) error Flush() error Init() error Load() ([]*unstructured.Unstructured, error) - Exists(string) (string, error) + Exists(image string) (string, error) } // LocalCache implements the Cache interface. diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 4561534f3..6b3949ea6 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -410,7 +410,7 @@ func AssociateByOrder(t []v1.ComposedTemplate, r []corev1.ObjectReference) []Tem // A CompositionTemplateAssociator returns an array of template associations. type CompositionTemplateAssociator interface { - AssociateTemplates(context.Context, resource.Composite, []v1.ComposedTemplate) ([]TemplateAssociation, error) + AssociateTemplates(ctx context.Context, xr resource.Composite, cts []v1.ComposedTemplate) ([]TemplateAssociation, error) } // A CompositionTemplateAssociatorFn returns an array of template associations. diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index e85dfe4ad..bdeb0b1bf 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -350,7 +350,7 @@ type revision struct { // A CompositionRevisionValidator validates the supplied CompositionRevision. type CompositionRevisionValidator interface { - Validate(*v1.CompositionRevision) error + Validate(rev *v1.CompositionRevision) error } // A CompositionRevisionValidatorFn is a function that validates a diff --git a/internal/controller/pkg/manager/revisioner.go b/internal/controller/pkg/manager/revisioner.go index 4e5f90b84..7ed09cf8a 100644 --- a/internal/controller/pkg/manager/revisioner.go +++ b/internal/controller/pkg/manager/revisioner.go @@ -35,7 +35,7 @@ const ( // Revisioner extracts a revision name for a package source. type Revisioner interface { - Revision(context.Context, v1.Package) (string, error) + Revision(ctx context.Context, p v1.Package) (string, error) } // PackageRevisioner extracts a revision name for a package source. diff --git a/internal/controller/pkg/revision/runtime.go b/internal/controller/pkg/revision/runtime.go index 50e5f3f58..8b74113fd 100644 --- a/internal/controller/pkg/revision/runtime.go +++ b/internal/controller/pkg/revision/runtime.go @@ -85,13 +85,13 @@ type ManifestBuilder interface { // establishes objects. type RuntimeHooks interface { // Pre performs operations meant to happen before establishing objects. - Pre(context.Context, runtime.Object, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Pre(ctx context.Context, obj runtime.Object, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error // Post performs operations meant to happen after establishing objects. - Post(context.Context, runtime.Object, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Post(ctx context.Context, obj runtime.Object, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error // Deactivate performs operations meant to happen before deactivating a revision. - Deactivate(context.Context, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Deactivate(ctx context.Context, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error } // RuntimeManifestBuilder builds the runtime manifests for a package revision. diff --git a/internal/dag/dag.go b/internal/dag/dag.go index dc8838a1b..f89eb64d2 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -29,15 +29,15 @@ type Node interface { // Node implementations should be careful to establish uniqueness of // neighbors in their AddNeighbors method or risk counting a neighbor // multiple times. - AddNeighbors(...Node) error + AddNeighbors(ns ...Node) error } // DAG is a Directed Acyclic Graph. type DAG interface { //nolint:interfacebloat // TODO(negz): Could this be several smaller interfaces? - Init(nodes []Node) ([]Node, error) - AddNode(Node) error - AddNodes(...Node) error - AddOrUpdateNodes(...Node) + Init(ns []Node) ([]Node, error) + AddNode(n Node) error + AddNodes(ns ...Node) error + AddOrUpdateNodes(ns ...Node) GetNode(identifier string) (Node, error) AddEdge(from string, to Node) (bool, error) AddEdges(edges map[string][]Node) ([]Node, error) diff --git a/internal/initializer/cert_generator.go b/internal/initializer/cert_generator.go index 173480da6..b162d345d 100644 --- a/internal/initializer/cert_generator.go +++ b/internal/initializer/cert_generator.go @@ -40,7 +40,7 @@ type CertificateSigner struct { // CertificateGenerator can return you TLS certificate valid for given domains. type CertificateGenerator interface { - Generate(*x509.Certificate, *CertificateSigner) (key []byte, crt []byte, err error) + Generate(c *x509.Certificate, cs *CertificateSigner) (key, crt []byte, err error) } var pkixName = pkix.Name{ //nolint:gochecknoglobals // We treat this as a constant. diff --git a/internal/xpkg/upbound/config/source.go b/internal/xpkg/upbound/config/source.go index 225cf9c5f..1dee522ac 100644 --- a/internal/xpkg/upbound/config/source.go +++ b/internal/xpkg/upbound/config/source.go @@ -29,7 +29,7 @@ import ( type Source interface { Initialize() error GetConfig() (*Config, error) - UpdateConfig(*Config) error + UpdateConfig(cfg *Config) error } // NewFSSource constructs a new FSSource. Path must be supplied via modifier or From ebc8367bf269006a407fbe68175d7d1740038c2e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 23:30:02 -0800 Subject: [PATCH 031/370] Enable predeclared linter This linter warns for using predeclared names, like new or append. Signed-off-by: Nic Cope --- .golangci.yml | 1 - internal/validation/apiextensions/v1/xrd/handler.go | 12 ++++++------ internal/xpkg/upbound/config/config.go | 6 +++--- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 80c22baa0..f011cf47f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -37,7 +37,6 @@ linters: - paralleltest - nonamedreturns - wastedassign - - predeclared - ireturn - nilnil - gomnd diff --git a/internal/validation/apiextensions/v1/xrd/handler.go b/internal/validation/apiextensions/v1/xrd/handler.go index 1f3243c90..40b5ed9e9 100644 --- a/internal/validation/apiextensions/v1/xrd/handler.go +++ b/internal/validation/apiextensions/v1/xrd/handler.go @@ -104,23 +104,23 @@ func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (war } // ValidateUpdate implements the same logic as ValidateCreate. -func (v *validator) ValidateUpdate(ctx context.Context, old, new runtime.Object) (warns admission.Warnings, err error) { +func (v *validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warns admission.Warnings, err error) { // Validate the update - oldObj, ok := old.(*v1.CompositeResourceDefinition) + oldXRD, ok := oldObj.(*v1.CompositeResourceDefinition) if !ok { return nil, errors.New(errUnexpectedType) } - newObj, ok := new.(*v1.CompositeResourceDefinition) + newXRD, ok := newObj.(*v1.CompositeResourceDefinition) if !ok { return nil, errors.New(errUnexpectedType) } // Validate the update - validationWarns, validationErr := newObj.ValidateUpdate(oldObj) + validationWarns, validationErr := newXRD.ValidateUpdate(oldXRD) warns = append(warns, validationWarns...) if validationErr != nil { return validationWarns, validationErr.ToAggregate() } - crds, err := getAllCRDsForXRD(newObj) + crds, err := getAllCRDsForXRD(newXRD) if err != nil { return warns, xperrors.Wrap(err, "cannot get CRDs for CompositeResourceDefinition") } @@ -137,7 +137,7 @@ func (v *validator) ValidateUpdate(ctx context.Context, old, new runtime.Object) // which previously did not specify a claim. err := v.dryRunUpdateOrCreateIfNotFound(ctx, crd) if err != nil { - return warns, v.rewriteError(err, newObj, crd) + return warns, v.rewriteError(err, newXRD, crd) } } diff --git a/internal/xpkg/upbound/config/config.go b/internal/xpkg/upbound/config/config.go index 502d2dc29..6bac89c8d 100644 --- a/internal/xpkg/upbound/config/config.go +++ b/internal/xpkg/upbound/config/config.go @@ -139,14 +139,14 @@ func checkProfile(p Profile) error { } // AddOrUpdateUpboundProfile adds or updates an Upbound profile to the Config. -func (c *Config) AddOrUpdateUpboundProfile(name string, new Profile) error { - if err := checkProfile(new); err != nil { +func (c *Config) AddOrUpdateUpboundProfile(name string, p Profile) error { + if err := checkProfile(p); err != nil { return err } if c.Upbound.Profiles == nil { c.Upbound.Profiles = map[string]Profile{} } - c.Upbound.Profiles[name] = new + c.Upbound.Profiles[name] = p return nil } From 807e90b755e4dd0c15b34936f76cc128215ab20a Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 18 Feb 2024 23:32:57 -0800 Subject: [PATCH 032/370] Enable wasted assign linter This linter warns when you assign a value that will never be used to a variable. Signed-off-by: Nic Cope --- .golangci.yml | 1 - .../apiextensions/composite/composition_transforms.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f011cf47f..bf66c6b88 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -36,7 +36,6 @@ linters: - stylecheck - paralleltest - nonamedreturns - - wastedassign - ireturn - nilnil - gomnd diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 5cbac5256..62781536f 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -156,7 +156,7 @@ func resolveMathMultiply(t v1.MathTransform, input any) (any, error) { // is not a number. depending on the type of clamp, the result will be either // the input or the clamp value, preserving their original types. func resolveMathClamp(t v1.MathTransform, input any) (any, error) { - in := int64(0) + var in int64 switch i := input.(type) { case int: in = int64(i) From 5860c9a31838c9227ee2d679dd6a00bf4cb7e51d Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 19 Feb 2024 07:33:42 -0800 Subject: [PATCH 033/370] Enable stylecheck linter Signed-off-by: Nic Cope --- .golangci.yml | 1 - apis/apiextensions/v1/composition_webhooks.go | 6 ++--- apis/apiextensions/v1/xrd_types.go | 22 +++++++++---------- apis/pkg/meta/v1/interfaces.go | 8 +++---- apis/pkg/meta/v1beta1/conversion.go | 2 +- cmd/crank/beta/validate/manager.go | 5 ++--- test/e2e/consts.go | 17 ++++++++++++++ 7 files changed, 38 insertions(+), 23 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bf66c6b88..72c865cee 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,7 +33,6 @@ linters: - thelper - testpackage - tagliatelle - - stylecheck - paralleltest - nonamedreturns - ireturn diff --git a/apis/apiextensions/v1/composition_webhooks.go b/apis/apiextensions/v1/composition_webhooks.go index c4b7310d0..80fda5572 100644 --- a/apis/apiextensions/v1/composition_webhooks.go +++ b/apis/apiextensions/v1/composition_webhooks.go @@ -50,12 +50,12 @@ var ( ) // GetSchemaAwareValidationMode returns the schema-aware validation mode set for the Composition. -func (in *Composition) GetSchemaAwareValidationMode() (CompositionValidationMode, error) { - if in.Annotations == nil { +func (c *Composition) GetSchemaAwareValidationMode() (CompositionValidationMode, error) { + if c.Annotations == nil { return DefaultSchemaAwareCompositionValidationMode, nil } - mode, ok := in.Annotations[SchemaAwareCompositionValidationModeAnnotation] + mode, ok := c.Annotations[SchemaAwareCompositionValidationModeAnnotation] if !ok { return DefaultSchemaAwareCompositionValidationMode, nil } diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index e30b9f789..24b005b5c 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -239,44 +239,44 @@ type CompositeResourceDefinitionList struct { // GetCompositeGroupVersionKind returns the schema.GroupVersionKind of the CRD for // the composite resource this CompositeResourceDefinition defines. -func (in CompositeResourceDefinition) GetCompositeGroupVersionKind() schema.GroupVersionKind { +func (c CompositeResourceDefinition) GetCompositeGroupVersionKind() schema.GroupVersionKind { v := "" - for _, vr := range in.Spec.Versions { + for _, vr := range c.Spec.Versions { if vr.Referenceable { v = vr.Name } } - return schema.GroupVersionKind{Group: in.Spec.Group, Version: v, Kind: in.Spec.Names.Kind} + return schema.GroupVersionKind{Group: c.Spec.Group, Version: v, Kind: c.Spec.Names.Kind} } // OffersClaim is true when a CompositeResourceDefinition offers a claim for the // composite resource it defines. -func (in CompositeResourceDefinition) OffersClaim() bool { - return in.Spec.ClaimNames != nil +func (c CompositeResourceDefinition) OffersClaim() bool { + return c.Spec.ClaimNames != nil } // GetClaimGroupVersionKind returns the schema.GroupVersionKind of the CRD for // the composite resource claim this CompositeResourceDefinition defines. An // empty GroupVersionKind is returned if the CompositeResourceDefinition does // not offer a claim. -func (in CompositeResourceDefinition) GetClaimGroupVersionKind() schema.GroupVersionKind { - if !in.OffersClaim() { +func (c CompositeResourceDefinition) GetClaimGroupVersionKind() schema.GroupVersionKind { + if !c.OffersClaim() { return schema.GroupVersionKind{} } v := "" - for _, vr := range in.Spec.Versions { + for _, vr := range c.Spec.Versions { if vr.Referenceable { v = vr.Name } } - return schema.GroupVersionKind{Group: in.Spec.Group, Version: v, Kind: in.Spec.ClaimNames.Kind} + return schema.GroupVersionKind{Group: c.Spec.Group, Version: v, Kind: c.Spec.ClaimNames.Kind} } // GetConnectionSecretKeys returns the set of allowed keys to filter the connection // secret. -func (in *CompositeResourceDefinition) GetConnectionSecretKeys() []string { - return in.Spec.ConnectionSecretKeys +func (c *CompositeResourceDefinition) GetConnectionSecretKeys() []string { + return c.Spec.ConnectionSecretKeys } diff --git a/apis/pkg/meta/v1/interfaces.go b/apis/pkg/meta/v1/interfaces.go index c6ee67aa4..fc578fbbc 100644 --- a/apis/pkg/meta/v1/interfaces.go +++ b/apis/pkg/meta/v1/interfaces.go @@ -41,11 +41,11 @@ func (c *Configuration) GetDependencies() []Dependency { // GetCrossplaneConstraints gets the Provider package's Crossplane version // constraints. -func (c *Provider) GetCrossplaneConstraints() *CrossplaneConstraints { - return c.Spec.MetaSpec.Crossplane +func (p *Provider) GetCrossplaneConstraints() *CrossplaneConstraints { + return p.Spec.MetaSpec.Crossplane } // GetDependencies gets the Provider package's dependencies. -func (c *Provider) GetDependencies() []Dependency { - return c.Spec.MetaSpec.DependsOn +func (p *Provider) GetDependencies() []Dependency { + return p.Spec.MetaSpec.DependsOn } diff --git a/apis/pkg/meta/v1beta1/conversion.go b/apis/pkg/meta/v1beta1/conversion.go index 24fe9b14a..872d93c32 100644 --- a/apis/pkg/meta/v1beta1/conversion.go +++ b/apis/pkg/meta/v1beta1/conversion.go @@ -17,4 +17,4 @@ limitations under the License. package v1beta1 // Hub marks this type as the conversion hub. -func (p *Function) Hub() {} +func (f *Function) Hub() {} diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index c3b4b081a..8a54127fa 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -21,7 +21,6 @@ import ( "io" "github.com/spf13/afero" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -50,7 +49,7 @@ type Manager struct { cache Cache writer io.Writer - crds []*apiextv1.CustomResourceDefinition + crds []*extv1.CustomResourceDefinition deps map[string]bool // One level dependency images confs map[string]bool // Configuration images } @@ -66,7 +65,7 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m.fetcher = &Fetcher{} m.writer = w - m.crds = make([]*apiextv1.CustomResourceDefinition, 0) + m.crds = make([]*extv1.CustomResourceDefinition, 0) m.deps = make(map[string]bool) m.confs = make(map[string]bool) diff --git a/test/e2e/consts.go b/test/e2e/consts.go index c29388ded..e49d91d6f 100644 --- a/test/e2e/consts.go +++ b/test/e2e/consts.go @@ -1,3 +1,20 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package e2e implements end-to-end tests for Crossplane. package e2e // LabelArea represents the 'area' of a feature. For example 'apiextensions', From 15848f49bb5882e160a369a637140966d0e2ba6a Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 19 Feb 2024 08:04:17 -0800 Subject: [PATCH 034/370] Don't enable ireturn linter We do believe in what this linter is linting for, but in practice we only seem to be returning interfaces where we need to. Signed-off-by: Nic Cope --- .golangci.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 72c865cee..2cf23f35c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -35,7 +35,6 @@ linters: - tagliatelle - paralleltest - nonamedreturns - - ireturn - nilnil - gomnd @@ -94,6 +93,12 @@ linters: # every duplicated code block to be factored out into a function. - dupl + # Warns about returning interfaces rather than concrete types. We do think + # it's best to avoid returning interfaces where possible. However, at the + # time of writing enabling this linter would only catche the (many) cases + # where we must return an interface. + - ireturn + linters-settings: errcheck: # report about not checking of errors in type assetions: `a := b.(MyStruct)`; From 09526c68edc4361b3464527e164b58926ad5736f Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 19 Feb 2024 08:16:48 -0800 Subject: [PATCH 035/370] Enable tagliatelle linter This linter checks that JSON struct tags are Go style camelCase. This is mostly what we want, especially for apis. Signed-off-by: Nic Cope --- .golangci.yml | 6 +++++- apis/apiextensions/v1/xrd_types.go | 2 +- internal/xpkg/upbound/context.go | 8 ++++---- internal/xpkg/upbound/token.go | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 2cf23f35c..d814e55dd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -32,7 +32,6 @@ linters: - varnamelen - thelper - testpackage - - tagliatelle - paralleltest - nonamedreturns - nilnil @@ -210,6 +209,11 @@ linters-settings: interfacebloat: max: 5 + + tagliatelle: + case: + rules: + json: goCamel issues: # Excluding configuration per-path and per-linter diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index 24b005b5c..2a0415d36 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -177,7 +177,7 @@ type CompositeResourceValidation struct { // OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and // pruning. // +kubebuilder:pruning:PreserveUnknownFields - OpenAPIV3Schema runtime.RawExtension `json:"openAPIV3Schema,omitempty"` + OpenAPIV3Schema runtime.RawExtension `json:"openAPIV3Schema,omitempty"` //nolint:tagliatelle // False positive. Linter thinks it should be Apiv3, not APIV3. } // CompositeResourceDefinitionStatus shows the observed state of the definition. diff --git a/internal/xpkg/upbound/context.go b/internal/xpkg/upbound/context.go index 0d6f545dc..b9fdb1b80 100644 --- a/internal/xpkg/upbound/context.go +++ b/internal/xpkg/upbound/context.go @@ -252,10 +252,10 @@ func (f Flags) MarshalJSON() ([]byte, error) { Domain string `json:"domain,omitempty"` Profile string `json:"profile,omitempty"` Account string `json:"account,omitempty"` - InsecureSkipTLSVerify bool `json:"insecure_skip_tls_verify,omitempty"` - APIEndpoint string `json:"override_api_endpoint,omitempty"` - ProxyEndpoint string `json:"override_proxy_endpoint,omitempty"` - RegistryEndpoint string `json:"override_registry_endpoint,omitempty"` + InsecureSkipTLSVerify bool `json:"insecure_skip_tls_verify,omitempty"` //nolint:tagliatelle // We want snake case in this file. + APIEndpoint string `json:"override_api_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. + ProxyEndpoint string `json:"override_proxy_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. + RegistryEndpoint string `json:"override_registry_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. }{ Domain: nullableURL(f.Domain), Profile: f.Profile, diff --git a/internal/xpkg/upbound/token.go b/internal/xpkg/upbound/token.go index 2237f8f75..525f70ad0 100644 --- a/internal/xpkg/upbound/token.go +++ b/internal/xpkg/upbound/token.go @@ -29,7 +29,7 @@ const errInvalidTokenFile = "token file is invalid" // TokenFile is the format in which Upbound tokens are stored on disk. type TokenFile struct { - AccessID string `json:"accessId"` + AccessID string `json:"accessId"` //nolint:tagliatelle // Should be accessID, but keeping accessId for backward compatibility. Token string `json:"token"` } From 3beb358ed73b0b3917e0aaf2bc0b63a72c4ff403 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 19 Feb 2024 08:27:19 -0800 Subject: [PATCH 036/370] Don't enable the nonamedreturns linter Signed-off-by: Nic Cope --- .golangci.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d814e55dd..a51b464c6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,7 +33,6 @@ linters: - thelper - testpackage - paralleltest - - nonamedreturns - nilnil - gomnd @@ -94,10 +93,18 @@ linters: # Warns about returning interfaces rather than concrete types. We do think # it's best to avoid returning interfaces where possible. However, at the - # time of writing enabling this linter would only catche the (many) cases + # time of writing enabling this linter would only catch the (many) cases # where we must return an interface. - ireturn + # Warns about returning named variables. We do think it's best to avoid + # returning named variables where possible. However, at the time of writing + # enabling this linter would only catch the (many) cases where returning + # named variables is useful to document what the variables are. For example + # we believe it makes sense to return (ready bool) rather than just (bool) + # to communicate what the bool means. + - nonamedreturns + linters-settings: errcheck: # report about not checking of errors in type assetions: `a := b.(MyStruct)`; From 33259552b43814b43ecadc0b3ee11742c6d7a67b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 19 Feb 2024 08:40:04 -0800 Subject: [PATCH 037/370] Enable thelper linter Signed-off-by: Nic Cope --- .golangci.yml | 1 - cmd/crank/beta/render/render_test.go | 4 ++ .../apiextensions/claim/reconciler_test.go | 2 + .../composite/reconciler_test.go | 2 + internal/xfn/function_runner_test.go | 4 ++ test/e2e/funcs/collect.go | 6 +++ test/e2e/funcs/env.go | 2 + test/e2e/funcs/feature.go | 48 +++++++++++++++++++ test/e2e/main_test.go | 3 ++ 9 files changed, 71 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index a51b464c6..3c13604f9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -30,7 +30,6 @@ linters: # make existing code compliant. - wrapcheck - varnamelen - - thelper - testpackage - paralleltest - nilnil diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/beta/render/render_test.go index ecd695e00..e6e57592b 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/beta/render/render_test.go @@ -753,6 +753,8 @@ func TestRender(t *testing.T) { } func NewFunction(t *testing.T, rsp *fnv1beta1.RunFunctionResponse) net.Listener { + t.Helper() + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) @@ -766,6 +768,8 @@ func NewFunction(t *testing.T, rsp *fnv1beta1.RunFunctionResponse) net.Listener } func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error)) net.Listener { + t.Helper() + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index e7cfa1aab..b27d52eca 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -594,7 +594,9 @@ func NewClaim(m ...ClaimModifier) *claim.Unstructured { // A status update function that ensures the supplied object is the claim we want. func WantClaim(t *testing.T, want *claim.Unstructured) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we're using unstructured types here where // the conditions are just a map[string]any. diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 3e7dc87f7..dda3711d5 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -760,7 +760,9 @@ func WithComposite(_ *testing.T, cr *composite.Unstructured) func(_ context.Cont // A status update function that ensures the supplied object is the XR we want. func WantComposite(t *testing.T, want resource.Composite) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we may be using unstructured types here where // the conditions are just a map[string]any. diff --git a/internal/xfn/function_runner_test.go b/internal/xfn/function_runner_test.go index 6b960418e..a2a417388 100644 --- a/internal/xfn/function_runner_test.go +++ b/internal/xfn/function_runner_test.go @@ -211,6 +211,8 @@ func TestRunFunction(t *testing.T) { } func TestGetClientConn(t *testing.T) { + t.Helper() + // TestRunFunction exercises most of the getClientConn code. Here we just // test some cases that don't fit well in our usual table-driven format. @@ -372,6 +374,8 @@ func NewListFn(target string) test.MockListFn { } func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Listener { + t.Helper() + // Listen on a random port. lis, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { diff --git a/test/e2e/funcs/collect.go b/test/e2e/funcs/collect.go index 294af6d41..0bf265b15 100644 --- a/test/e2e/funcs/collect.go +++ b/test/e2e/funcs/collect.go @@ -52,6 +52,8 @@ type coordinate struct { // Note: this is a pretty expensive operation only suited for e2e tests with // small clusters. func buildRelatedObjectGraph(ctx context.Context, t *testing.T, discoveryClient discovery.DiscoveryInterface, client dynamic.Interface, mapper meta.RESTMapper) (map[coordinate][]coordinate, error) { + t.Helper() + // Discover all resource types resourceLists, err := discoveryClient.ServerPreferredResources() if err != nil { @@ -142,6 +144,8 @@ func parseAPIVersion(apiVersion string) (group, version string) { // ownership, i.e. the returned objects are transitively owned by obj, or // resource reference. func RelatedObjects(ctx context.Context, t *testing.T, config *rest.Config, objs ...client.Object) ([]client.Object, error) { + t.Helper() + dynClient, err := dynamic.NewForConfig(config) if err != nil { return nil, err @@ -185,6 +189,8 @@ func RelatedObjects(ctx context.Context, t *testing.T, config *rest.Config, objs } func loadCoordinates(ctx context.Context, t *testing.T, dynClient dynamic.Interface, coords []coordinate) []client.Object { + t.Helper() + ret := make([]client.Object, 0, len(coords)) for _, coord := range coords { other, err := dynClient.Resource(coord.GroupVersionResource).Namespace(coord.Namespace).Get(ctx, coord.Name, metav1.GetOptions{}) diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index 7f620251d..82b9e9f52 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -70,6 +70,8 @@ func HelmUpgrade(o ...helm.Option) env.Func { // returns an error the calling test is failed with t.Fatal(err). func AsFeaturesFunc(fn env.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + ctx, err := fn(ctx, c) if err != nil { t.Fatal(err) diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index d7ac1b024..be633baf3 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -69,6 +69,8 @@ type onSuccessHandler func(o k8s.Object) // AllOf runs the supplied functions in order. func AllOf(fns ...features.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + for _, fn := range fns { ctx = fn(ctx, t, c) } @@ -79,6 +81,8 @@ func AllOf(fns ...features.Func) features.Func { // InBackground runs the supplied function in a goroutine. func InBackground(fn features.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + go fn(ctx, t, c) return ctx } @@ -109,6 +113,8 @@ func ReadyToTestWithin(d time.Duration, namespace string) features.Func { // not Available within the supplied duration. func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dp := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} t.Logf("Waiting %s for deployment %s/%s to become Available...", d, dp.GetNamespace(), dp.GetName()) start := time.Now() @@ -125,6 +131,8 @@ func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) f // to exist within the supplied duration. func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -153,6 +161,8 @@ func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func // exist within the supplied duration. func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to be created...", d, identifier(o)) start := time.Now() @@ -170,6 +180,8 @@ func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { // within the supplied duration. func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -202,6 +214,8 @@ func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func // within the supplied duration. func ResourceDeletedWithin(d time.Duration, o k8s.Object) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to be deleted...", d, identifier(o)) start := time.Now() @@ -220,6 +234,8 @@ func ResourceDeletedWithin(d time.Duration, o k8s.Object) features.Func { // Comparison of conditions is modulo messages. func ResourcesHaveConditionWithin(d time.Duration, dir, pattern string, cds ...xpv1.Condition) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -313,6 +329,8 @@ var NotFound = notFound{} //nolint:gochecknoglobals // We treat this as a consta // duration. The supplied 'want' value must cmp.Equal the actual value. func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -369,6 +387,8 @@ func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, w // duration. The supplied 'want' value must cmp.Equal the actual value. func ResourceHasFieldValueWithin(d time.Duration, o k8s.Object, path string, want any) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to have value %q at field path %s...", d, identifier(o), want, path) match := func(o k8s.Object) bool { @@ -408,6 +428,8 @@ func ResourceHasFieldValueWithin(d time.Duration, o k8s.Object, path string, wan // resource cannot be applied successfully. func ApplyResources(manager, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) files, _ := fs.Glob(dfs, pattern) @@ -432,6 +454,8 @@ type claimCtxKey struct{} // and stores it in the test context for later retrival if needed. func ApplyClaim(manager, dir, cm string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) files, _ := fs.Glob(dfs, cm) @@ -484,6 +508,8 @@ func SetAnnotationMutateOption(key, value string) decoder.DecodeOption { // server should reject a resource. func ResourcesFailToApply(manager, dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) if err := decoder.DecodeEachFile(ctx, dfs, pattern, ApplyHandler(c.Client().Resources(), manager)); err == nil { @@ -524,6 +550,8 @@ func ApplyHandler(r *resources.Resources, manager string, osh ...onSuccessHandle // (e.g. *.yaml). func DeleteResources(dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) if err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())); err != nil { @@ -541,6 +569,8 @@ func DeleteResources(dir, pattern string) features.Func { // the test context does not change within the given time. func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) if !ok { t.Fatalf("claim not available in the context") @@ -574,6 +604,8 @@ func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { // the test context does not change within the given time. func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) if !ok { t.Fatalf("claim not available in the context") @@ -619,6 +651,8 @@ func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { // must be matched by the given function within the given timeout. func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { @@ -669,6 +703,8 @@ func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, ma // the specified time. func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path string, want any) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { @@ -738,6 +774,8 @@ func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path // supplied path within the supplied duration. func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { //nolint:gocognit // Not too much over. return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} if err := decoder.DecodeFile(os.DirFS(dir), file, cm); err != nil { t.Error(err) @@ -827,6 +865,8 @@ func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path stri // validation function within the supplied duration. func ListedResourcesValidatedWithin(d time.Duration, list k8s.ObjectList, min int, validate func(object k8s.Object) bool, listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := wait.For(conditions.New(c.Client().Resources()).ResourceListMatchN(list, min, validate, listOptions...), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { y, _ := yaml.Marshal(list) t.Errorf("resources didn't pass validation: %v:\n\n%s\n\n", err, y) @@ -842,6 +882,8 @@ func ListedResourcesValidatedWithin(d time.Duration, list k8s.ObjectList, min in // is not deleted within the supplied duration. func ListedResourcesDeletedWithin(d time.Duration, list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := c.Client().Resources().List(ctx, list, listOptions...); err != nil { return ctx } @@ -861,6 +903,8 @@ func ListedResourcesDeletedWithin(d time.Duration, list k8s.ObjectList, listOpti // not modified within the supplied duration. func ListedResourcesModifiedWith(list k8s.ObjectList, min int, modify func(object k8s.Object), listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := c.Client().Resources().List(ctx, list, listOptions...); err != nil { return ctx } @@ -896,6 +940,8 @@ func ListedResourcesModifiedWith(list k8s.ObjectList, min int, modify func(objec // and changed conditions. func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { //nolint:gocognit // this is a test helper return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + prev := map[string]map[xpv1.ConditionType]xpv1.Condition{} pollCtx, cancel := context.WithCancel(ctx) @@ -964,6 +1010,8 @@ func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) feat // webhook. func DeletionBlockedByUsageWebhook(dir, pattern string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())) diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 14fd5a4b6..605d3c58c 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -145,7 +145,10 @@ func TestMain(m *testing.M) { } // Check that all features are specifying a suite they belong to via LabelTestSuite. + //nolint:thelper // We can't make testing.T the second argument because we want to satisfy types.FeatureEnvFunc. environment.BeforeEachFeature(func(ctx context.Context, _ *envconf.Config, t *testing.T, feature features.Feature) (context.Context, error) { + t.Helper() + if _, exists := feature.Labels()[config.LabelTestSuite]; !exists { t.Fatalf("Feature %q does not have the required %q label set", feature.Name(), config.LabelTestSuite) } From 75c0a364d370ff781275ba8c64cc8447f89e97e3 Mon Sep 17 00:00:00 2001 From: Bob Haddleton Date: Fri, 23 Feb 2024 13:24:25 -0600 Subject: [PATCH 038/370] Update status to Deleting while waiting for foreground deletion Signed-off-by: Bob Haddleton --- .../controller/apiextensions/claim/reconciler.go | 2 +- .../apiextensions/claim/reconciler_test.go | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index f1ffea553..a6c0c485f 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -411,7 +411,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } if meta.WasDeleted(xr) && requiresForegroundDeletion { log.Debug("Waiting for the XR to finish deleting (foreground deletion)") - return reconcile.Result{Requeue: true}, nil + return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, cm), errUpdateClaimStatus) } do := &client.DeleteOptions{} if requiresForegroundDeletion { diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index 904ff2604..e7cfa1aab 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -383,7 +383,18 @@ func TestReconcile(t *testing.T) { } return nil }), - }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + cm.SetCompositeDeletePolicy(&fg) + + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + })), + }, + ), WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), From eb9fe61639ccf20a109849660229caeb1183dae8 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 23 Feb 2024 08:55:48 +0100 Subject: [PATCH 039/370] Wire klog with a filter to show only client- or server-side throttling Signed-off-by: Dr. Stefan Schimanski --- cmd/crossplane/main.go | 4 ++++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index 6eab7c47f..0e0b71daf 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -68,6 +68,7 @@ func (d debugFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // Be // *very* verbose even at info level, so we only provide it a real // logger when we're running in debug mode. ctrl.SetLogger(zl) + logging.SetFilteredKlogLogger(zl) return nil } @@ -79,11 +80,14 @@ func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // Bef func main() { zl := zap.New().WithName("crossplane") + logging.SetFilteredKlogLogger(zl) + // Setting the controller-runtime logger to a no-op logger by default, // unless debug mode is enabled. This is because the controller-runtime // logger is *very* verbose even at info level. This is not really needed, // but otherwise we get a warning from the controller-runtime. ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) + // Note that the controller managers scheme must be a superset of the // package manager's object scheme; it must contain all object types that // may appear in a Crossplane package. This is because the package manager diff --git a/go.mod b/go.mod index 8e0caef7e..f62cc02a5 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.8.1 github.com/bufbuild/buf v1.27.2 - github.com/crossplane/crossplane-runtime v1.16.0-rc.1 + github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 github.com/docker/docker v25.0.2+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.1 diff --git a/go.sum b/go.sum index f3bdb7158..fba4a06b2 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1 h1:wBvfaEgDdYa47qovPWYc80IGTBw17B3zw3yf2Q2NNkQ= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1/go.mod h1:kRcJjJQmBFrR2n/KhwL8wYS7xNfq3D8eK4JliEScOHI= +github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 h1:Jiqj9j43gUX/goitNa86/ociah8G74C3pIGwIPSZsks= +github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5/go.mod h1:rG/KJwyA4iGMCubZ1EXs39Ow7XvOcWEfb1u3jkNekfw= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= From 09af8646b60e21a3d5d3a7ab5a50960c65c451b8 Mon Sep 17 00:00:00 2001 From: Gualter Barbas Baptista Date: Fri, 9 Feb 2024 22:52:02 +0100 Subject: [PATCH 040/370] Update ADOPTERS.md with Deutsche Bahn Signed-off-by: Gualter Barbas Baptista --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index e18bf5b10..92702cb27 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -76,3 +76,4 @@ This list is sorted in the order that organizations were added to it. | [babelforce](https://www.babelforce.com/) | [@nik843](https://github.com/nik843) | Orchestrating relational database servers by creating databases, users and permissions for them within all environments. | | [Nike](https://nike.com/) | [joel.cooklin@nike.com](mailto:joel.cooklin@nike.com) | Crossplane powers the internal developer platform managing thousands of resources from development to production. | | [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | +| [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| From 947c76f0f8fee1651ed73265b17927ceb992649c Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 27 Feb 2024 14:55:19 +0100 Subject: [PATCH 041/370] apiextensions/definition: don't attempt to start multiple composite controllers Signed-off-by: Dr. Stefan Schimanski --- .../apiextensions/definition/reconciler.go | 13 ++- .../definition/reconciler_test.go | 80 +++++++++++++++++-- 2 files changed, 86 insertions(+), 7 deletions(-) diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 4ae28275b..f953a77fc 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -446,7 +446,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco observed := d.Status.Controllers.CompositeResourceTypeRef desired := v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) - if observed.APIVersion != "" && observed != desired { + switch { + case observed.APIVersion != "" && observed != desired: r.composite.Stop(composite.ControllerName(d.GetName())) if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) @@ -454,6 +455,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco log.Debug("Referenceable version changed; stopped composite resource controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) + case r.composite.IsRunning(composite.ControllerName(d.GetName())): + log.Debug("Composite resource controller is running") + d.Status.SetConditions(v1.WatchingComposite()) + return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) + default: + if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { + log.Debug("Composite resource controller encountered an error. Going to restart it", "error", err) + } else { + log.Debug("Composite resource controller is not running. Going to start it") + } } ro := CompositeReconcilerOptions(r.options, d, r.client, r.log, r.record) diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index b78348cc7..78a5ef4f7 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -51,10 +51,15 @@ import ( type MockEngine struct { ControllerEngine - MockCreate func(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) - MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockIsRunning func(name string) bool + MockCreate func(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) + MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error + MockStop func(name string) + MockErr func(name string) error +} + +func (m *MockEngine) IsRunning(name string) bool { + return m.MockIsRunning(name) } func (m *MockEngine) Create(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) { @@ -567,7 +572,8 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockErr: func(_ string) error { return nil }, MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { return nil, errBoom }, @@ -627,7 +633,8 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. + MockIsRunning: func(_ string) bool { return false }, + MockErr: func(_ string) error { return errBoom }, // This error should only be logged. MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { return mockNamedController{ MockStart: func(_ context.Context) error { return nil }, @@ -735,6 +742,67 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: false}, }, }, + "NotRestartingWithoutVersionChange": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + mgr: &mockManager{ + GetCacheFn: func() cache.Cache { + return &mockCache{ + ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, + } + }, + GetClientFn: func() client.Client { + return &test.MockClient{MockList: test.NewMockListFn(nil)} + }, + GetSchemeFn: runtime.NewScheme, + GetRESTMapperFn: func() meta.RESTMapper { + return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + }, + }, + opts: []ReconcilerOption{ + WithClientApplicator(resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } + return nil + }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return true }, + MockErr: func(_ string) error { return errBoom }, // This error should only be logged. + MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { + t.Errorf("MockCreate should not be called") + return nil, nil + }, + }), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, } // Run every test with and without the realtime compositions feature. From 1c949ed056ef03652a273c1cdf5316446688015c Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 23 Feb 2024 10:36:39 +0100 Subject: [PATCH 042/370] apiextensions/definition: unregister composite gvk from composedResourceInformers everywhere Signed-off-by: Dr. Stefan Schimanski --- .../apiextensions/definition/reconciler.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index f953a77fc..6ec379344 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -340,7 +340,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // controller on a previous reconcile, but we try again // just in case. This is a no-op if the controller was // already stopped. - r.composite.Stop(composite.ControllerName(d.GetName())) + r.stopCompositeController(d) log.Debug("Stopped composite resource controller") if err := r.composite.RemoveFinalizer(ctx, d); err != nil { @@ -392,7 +392,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // The controller should be stopped before the deletion of CRD // so that it doesn't crash. - r.composite.Stop(composite.ControllerName(d.GetName())) + r.stopCompositeController(d) log.Debug("Stopped composite resource controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -448,10 +448,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco desired := v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) switch { case observed.APIVersion != "" && observed != desired: - r.composite.Stop(composite.ControllerName(d.GetName())) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) - } + r.stopCompositeController(d) log.Debug("Referenceable version changed; stopped composite resource controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) @@ -536,6 +533,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) } +func (r *Reconciler) stopCompositeController(d *v1.CompositeResourceDefinition) { + r.composite.Stop(composite.ControllerName(d.GetName())) + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) + } +} + // CompositeReconcilerOptions builds the options for a composite resource // reconciler. The options vary based on the supplied feature flags. func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.CompositeResourceDefinition, c client.Client, l logging.Logger, e event.Recorder) []composite.ReconcilerOption { From b3a366f2036abade23e5af30cb2e3e9f4934c3e3 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Wed, 14 Feb 2024 16:35:27 +0300 Subject: [PATCH 043/370] Ability to replay deletion with Usage spec Signed-off-by: Hasan Turken (cherry picked from commit b0dad402cbb5f939e068d3fa49c6a2260c1ed57d) --- apis/apiextensions/v1alpha1/usage_types.go | 3 ++ .../v1alpha1/zz_generated.deepcopy.go | 5 ++ .../apiextensions.crossplane.io_usages.yaml | 5 ++ .../apiextensions/usage/reconciler.go | 14 +++++ .../apiextensions/usage/reconciler_test.go | 54 +++++++++++++++++++ internal/usage/handler.go | 27 ++++++++-- internal/usage/handler_test.go | 23 +++++--- 7 files changed, 120 insertions(+), 11 deletions(-) diff --git a/apis/apiextensions/v1alpha1/usage_types.go b/apis/apiextensions/v1alpha1/usage_types.go index f590b4a10..1cb1ee0e8 100644 --- a/apis/apiextensions/v1alpha1/usage_types.go +++ b/apis/apiextensions/v1alpha1/usage_types.go @@ -68,6 +68,9 @@ type UsageSpec struct { // Reason is the reason for blocking deletion of the resource. // +optional Reason *string `json:"reason,omitempty"` + // ReplayDeletion will trigger a deletion on the used resource during the deletion of the usage itself, if it was attempted to be deleted at least once. + // +optional + ReplayDeletion *bool `json:"replayDeletion,omitempty"` } // UsageStatus defines the observed state of Usage. diff --git a/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go b/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go index c53d5a361..45148977d 100644 --- a/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go @@ -229,6 +229,11 @@ func (in *UsageSpec) DeepCopyInto(out *UsageSpec) { *out = new(string) **out = **in } + if in.ReplayDeletion != nil { + in, out := &in.ReplayDeletion, &out.ReplayDeletion + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsageSpec. diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index 0cd30d52f..e05a9b444 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -138,6 +138,11 @@ spec: reason: description: Reason is the reason for blocking deletion of the resource. type: string + replayDeletion: + description: ReplayDeletion will trigger a deletion on the used resource + during the deletion of the usage itself, if it was attempted to + be deleted at least once. + type: boolean required: - of type: object diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 3de23a37b..b89f90cdc 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -86,6 +86,7 @@ const ( reasonRemoveInUseLabel event.Reason = "RemoveInUseLabel" reasonAddFinalizer event.Reason = "AddFinalizer" reasonRemoveFinalizer event.Reason = "RemoveFinalizer" + reasonReplayDeletion event.Reason = "ReplayDeletion" reasonUsageConfigured event.Reason = "UsageConfigured" reasonWaitUsing event.Reason = "WaitingUsingDeleted" @@ -316,6 +317,19 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } + if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil && used.GetAnnotations()[usage.AnnotationKeyFirstDeletionAttempt] != "" { + // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. + log.Debug("Replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName()) + go func() { + // We do the deletion async and after some delay to make sure the usage is deleted before the + // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear + // very soon. + time.Sleep(2 * time.Second) + // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. + _ = r.client.Delete(context.Background(), used) + }() + } + // Remove the finalizer from the usage if err = r.usage.RemoveFinalizer(ctx, u); err != nil { log.Debug(errRemoveFinalizer, "error", err) diff --git a/internal/controller/apiextensions/usage/reconciler_test.go b/internal/controller/apiextensions/usage/reconciler_test.go index 2b076325d..0a1aa78ef 100644 --- a/internal/controller/apiextensions/usage/reconciler_test.go +++ b/internal/controller/apiextensions/usage/reconciler_test.go @@ -26,6 +26,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -38,6 +39,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" + "github.com/crossplane/crossplane/internal/usage" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -723,6 +725,58 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{}, }, }, + "SuccessfulDeleteWithReplayDeletion": { + reason: "We should replay deletion after usage is gone and replayDeletion is true.", + args: args{ + mgr: &fake.Manager{}, + opts: []ReconcilerOption{ + WithClientApplicator(xpresource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if o, ok := obj.(*v1alpha1.Usage); ok { + o.SetDeletionTimestamp(&now) + o.Spec.ReplayDeletion = ptr.To(true) + o.Spec.Of.ResourceRef = &v1alpha1.ResourceRef{Name: "cool"} + return nil + } + if o, ok := obj.(*composed.Unstructured); ok { + o.SetAnnotations(map[string]string{usage.AnnotationKeyFirstDeletionAttempt: time.Now().String()}) + o.SetLabels(map[string]string{inUseLabelKey: "true"}) + return nil + } + return errors.New("unexpected object type") + }), + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { + return nil + }), + MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + if o, ok := obj.(*composed.Unstructured); ok { + if o.GetLabels()[inUseLabelKey] != "" { + t.Errorf("expected in use label to be removed") + } + return nil + } + return errors.New("unexpected object type") + }), + MockDelete: func(_ context.Context, _ client.Object, _ ...client.DeleteOption) error { + return nil + }, + }, + }), + WithSelectorResolver(fakeSelectorResolver{ + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { + return nil + }, + }), + WithFinalizer(xpresource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ xpresource.Object) error { + return nil + }}), + }, + }, + want: want{ + r: reconcile.Result{}, + }, + }, "SuccessfulWaitWhenUsingStillThere": { reason: "We should wait until the using resource is deleted.", args: args{ diff --git a/internal/usage/handler.go b/internal/usage/handler.go index 2865c4621..8ef0fb4c9 100644 --- a/internal/usage/handler.go +++ b/internal/usage/handler.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "net/http" + "time" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +35,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" + xpmeta "github.com/crossplane/crossplane-runtime/pkg/meta" xpunstructured "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" @@ -44,6 +46,10 @@ const ( // indexing and retrieving needed CRDs InUseIndexKey = "inuse.apiversion.kind.name" + // AnnotationKeyFirstDeletionAttempt is the annotation key used to record the timestamp for first deletion attempt + // which was blocked due to usage. + AnnotationKeyFirstDeletionAttempt = "usage.crossplane.io/first-deletion-attempt" + // Error strings. errFmtUnexpectedOp = "unexpected operation %q, expected \"DELETE\"" ) @@ -87,7 +93,7 @@ func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error // Handler implements the admission Handler for Composition. type Handler struct { - reader client.Reader + client client.Client log logging.Logger } @@ -102,9 +108,9 @@ func WithLogger(l logging.Logger) HandlerOption { } // NewHandler returns a new Handler. -func NewHandler(reader client.Reader, opts ...HandlerOption) *Handler { +func NewHandler(client client.Client, opts ...HandlerOption) *Handler { h := &Handler{ - reader: reader, + client: client, log: logging.NewNopLogger(), } @@ -135,13 +141,26 @@ func (h *Handler) Handle(ctx context.Context, request admission.Request) admissi func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured) admission.Response { h.log.Debug("Validating no usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName()) usageList := &v1alpha1.UsageList{} - if err := h.reader.List(ctx, usageList, client.MatchingFields{InUseIndexKey: IndexValueForObject(u)}); err != nil { + if err := h.client.List(ctx, usageList, client.MatchingFields{InUseIndexKey: IndexValueForObject(u)}); err != nil { h.log.Debug("Error when getting Usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) return admission.Errored(http.StatusInternalServerError, err) } if len(usageList.Items) > 0 { msg := inUseMessage(usageList) h.log.Debug("Usage found, deletion not allowed", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "msg", msg) + + // If the resource is being deleted, we want to record the first deletion attempt + // so that we can track whether a deletion was attempted at least once. + if u.GetAnnotations() == nil || u.GetAnnotations()[AnnotationKeyFirstDeletionAttempt] == "" { + orig := u.DeepCopy() + xpmeta.AddAnnotations(u, map[string]string{AnnotationKeyFirstDeletionAttempt: metav1.Now().Format(time.RFC3339)}) + // Patch the resource to add the deletion attempt annotation + if err := h.client.Patch(ctx, u, client.MergeFrom(orig)); err != nil { + h.log.Debug("Error when patching the resource to add the deletion attempt annotation", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) + return admission.Errored(http.StatusInternalServerError, err) + } + } + return admission.Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, diff --git a/internal/usage/handler_test.go b/internal/usage/handler_test.go index 7340cfd23..734f3da30 100644 --- a/internal/usage/handler_test.go +++ b/internal/usage/handler_test.go @@ -42,7 +42,7 @@ var errBoom = errors.New("boom") func TestHandle(t *testing.T) { protected := "This resource is protected!" type args struct { - reader client.Reader + client client.Client request admission.Request } type want struct { @@ -121,7 +121,7 @@ func TestHandle(t *testing.T) { "DeleteAllowedNoUsages": { reason: "We should allow a delete request if there is no usages for the given object.", args: args{ - reader: &test.MockClient{ + client: &test.MockClient{ MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, @@ -147,7 +147,7 @@ func TestHandle(t *testing.T) { "DeleteRejectedCannotList": { reason: "We should reject a delete request if we cannot list usages.", args: args{ - reader: &test.MockClient{ + client: &test.MockClient{ MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, @@ -173,7 +173,10 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageBy": { reason: "We should reject a delete request if there are usages for the given object with \"by\" defined.", args: args{ - reader: &test.MockClient{ + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ @@ -231,7 +234,10 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageReason": { reason: "We should reject a delete request if there are usages for the given object with \"reason\" defined.", args: args{ - reader: &test.MockClient{ + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ @@ -283,7 +289,10 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageNone": { reason: "We should reject a delete request if there are usages for the given object without \"reason\" or \"by\" defined.", args: args{ - reader: &test.MockClient{ + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ @@ -334,7 +343,7 @@ func TestHandle(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - h := NewHandler(tc.args.reader, WithLogger(logging.NewNopLogger())) + h := NewHandler(tc.args.client, WithLogger(logging.NewNopLogger())) got := h.Handle(context.Background(), tc.args.request) if diff := cmp.Diff(tc.want.resp, got); diff != "" { t.Errorf("%s\nHandle(...): -want response, +got:\n%s", tc.reason, diff) From c25faee3a838e0c0c80e2b7120bbdfa65ee98caa Mon Sep 17 00:00:00 2001 From: nolancon Date: Fri, 1 Mar 2024 12:19:24 +0000 Subject: [PATCH 044/370] Add Akamai to list of adopters Signed-off-by: nolancon --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 92702cb27..f86453823 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -77,3 +77,4 @@ This list is sorted in the order that organizations were added to it. | [Nike](https://nike.com/) | [joel.cooklin@nike.com](mailto:joel.cooklin@nike.com) | Crossplane powers the internal developer platform managing thousands of resources from development to production. | | [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | | [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| +| [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also developed [provider-ceph](https://github.com/linode/provider-ceph) for object storage management across distributed Ceph clusters. | From 87ca5444261281d06da5cb9f8fda808b481be5e3 Mon Sep 17 00:00:00 2001 From: nolancon Date: Fri, 1 Mar 2024 17:32:23 +0000 Subject: [PATCH 045/370] Reword adopters entry with numbers Signed-off-by: nolancon --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index f86453823..2cc42c7d0 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -77,4 +77,4 @@ This list is sorted in the order that organizations were added to it. | [Nike](https://nike.com/) | [joel.cooklin@nike.com](mailto:joel.cooklin@nike.com) | Crossplane powers the internal developer platform managing thousands of resources from development to production. | | [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | | [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| -| [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also developed [provider-ceph](https://github.com/linode/provider-ceph) for object storage management across distributed Ceph clusters. | +| [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | From 04b3642609ca8623de15625c2bae649d211677ee Mon Sep 17 00:00:00 2001 From: Dylan Moore Date: Mon, 4 Mar 2024 08:42:52 -0800 Subject: [PATCH 046/370] Added Variphy to ADOPTERS.MD Signed-off-by: Dylan Moore --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 2cc42c7d0..6738f6975 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -78,3 +78,4 @@ This list is sorted in the order that organizations were added to it. | [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | | [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| | [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | +| [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | From 5bc9fbb24a0f7484fc1ca27da579b8bd6a6f3581 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 19:13:58 +0000 Subject: [PATCH 047/370] fix(deps): update module golang.org/x/term to v0.18.0 --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index f62cc02a5..9b6344771 100644 --- a/go.mod +++ b/go.mod @@ -201,8 +201,8 @@ require ( golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.20.0 // indirect // indirect golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect diff --git a/go.sum b/go.sum index fba4a06b2..6c814f025 100644 --- a/go.sum +++ b/go.sum @@ -583,8 +583,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -592,8 +592,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From d3e196464f7278ec996ad4fa9752e8e0d79a47dd Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Mon, 4 Mar 2024 20:32:16 +0000 Subject: [PATCH 048/370] Update Brian's Github account to lindblombr Signed-off-by: Jared Watts --- GOVERNANCE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 541edeb33..cffc7d458 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -67,7 +67,7 @@ Here are the members of the initial steering committee (listed in alphabetical o |   | Member | Organization | Email | Term Start | Term End | |-----------------------------------------------------------|------------------------------------------------|--------------|-----------------------------|------------|------------| | | [Bassam Tabbara](https://github.com/bassam) | Upbound | bassam@upbound.io | 2024-02-06 | 2026-02-06 | -| | [Brian Lindblom](https://github.com/brlindblom)| Apple | blindblom@apple.com | 2024-02-06 | 2025-02-07 | +| | [Brian Lindblom](https://github.com/lindblombr)| Apple | blindblom@apple.com | 2024-02-06 | 2025-02-07 | | | [Bob Haddleton](https://github.com/bobh66) | Nokia | bob.haddleton@nokia.com | 2024-02-06 | 2025-02-07 | | | [Jared Watts](https://github.com/jbw976) | Upbound | jared@upbound.io | 2024-02-06 | 2026-02-06 | | | [Nic Cope](https://github.com/negz) | Upbound | negz@upbound.io | 2024-02-06 | 2026-02-06 | From f816713039df09a0fe53b6085b4f47e73c20cccd Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 9 Feb 2024 15:48:40 +0000 Subject: [PATCH 049/370] fix: ignore invalid resources when composing Signed-off-by: Philippe Scorsolini --- .../apiextensions/composite/composed.go | 8 ++- .../composite/composition_functions.go | 66 ++++++++++++------- .../composite/composition_functions_test.go | 8 +-- .../apiextensions/composite/composition_pt.go | 19 +++++- .../composite/composition_pt_test.go | 3 + .../apiextensions/composite/reconciler.go | 56 +++++++++++----- .../composite/reconciler_test.go | 6 ++ 7 files changed, 120 insertions(+), 46 deletions(-) diff --git a/internal/controller/apiextensions/composite/composed.go b/internal/controller/apiextensions/composite/composed.go index 14b293a5b..8d89a5c75 100644 --- a/internal/controller/apiextensions/composite/composed.go +++ b/internal/controller/apiextensions/composite/composed.go @@ -35,8 +35,14 @@ type ComposedResource struct { ResourceName ResourceName // Ready indicates whether this composed resource is ready - i.e. whether - // all of its readiness checks passed. + // all of its readiness checks passed. Setting it to false will cause the + // XR to be marked as not ready. Ready bool + + // Synced indicates whether the composition process was able to sync the + // composed resource with its desired state. Setting it to false will cause + // the XR to be marked as not synced. + Synced bool } // ComposedResourceState represents a composed resource (either desired or diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 4f1c6d2b6..379168036 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -454,6 +454,46 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{}, errors.Wrap(err, errApplyXRRefs) } + // Produce our array of resources to return to the Reconciler. The + // Reconciler uses this array to determine whether the XR is ready. + resources := make([]ComposedResource, 0, len(desired)) + + // We apply all of our desired resources before we observe them in the loop + // below. This ensures that issues observing and processing one composed + // resource won't block the application of another. + for name, cd := range desired { + // We don't need any crossplane-runtime resource.Applicator style apply + // options here because server-side apply takes care of everything. + // Specifically it will merge rather than replace owner references (e.g. + // for Usages), and will fail if we try to add a controller reference to + // a resource that already has a different one. + // NOTE(phisco): We need to set a field owner unique for each XR here, + // this prevents multiple XRs composing the same resource to be + // continuously alternated as controllers. + if err := c.client.Patch(ctx, cd.Resource, client.Apply, client.ForceOwnership, client.FieldOwner(ComposedFieldOwnerName(xr))); err != nil { + if kerrors.IsInvalid(err) { + // We tried applying an invalid resource, we can't tell whether + // this means the resource will never be valid or it will if we + // run again the composition after some other resource is + // created or updated successfully. So, we emit a warning event + // and move on. + // We mark the resource as not synced, so that once we get to + // decide the XR's Synced condition, we can set it to false if + // any of the resources didn't sync successfully. + events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyCD, name))) + // NOTE(phisco): here we behave differently w.r.t. the native + // p&t composer, as we respect the readiness reported by + // functions, while there we defaulted to also set ready false + // in case of apply errors. + resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready, Synced: false}) + continue + } + return CompositionResult{}, errors.Wrapf(err, errFmtApplyCD, name) + } + + resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready, Synced: true}) + } + // Our goal here is to patch our XR's status using server-side apply. We // want the resulting, patched object loaded into uxr. We need to pass in // only our "fully specified intent" - i.e. only the fields that we actually @@ -475,32 +515,12 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // NOTE(phisco): Here we are fine using a hardcoded field owner as there is // no risk of conflict between different XRs. if err := c.client.Status().Patch(ctx, xr, client.Apply, client.ForceOwnership, client.FieldOwner(FieldOwnerXR)); err != nil { + // Note(phisco): here we are fine with this error being terminal, as + // there is no other resource to apply that might eventually resolve + // this issue. return CompositionResult{}, errors.Wrap(err, errApplyXRStatus) } - // Produce our array of resources to return to the Reconciler. The - // Reconciler uses this array to determine whether the XR is ready. - resources := make([]ComposedResource, 0, len(desired)) - - // We apply all of our desired resources before we observe them in the loop - // below. This ensures that issues observing and processing one composed - // resource won't block the application of another. - for name, cd := range desired { - // We don't need any crossplane-runtime resource.Applicator style apply - // options here because server-side apply takes care of everything. - // Specifically it will merge rather than replace owner references (e.g. - // for Usages), and will fail if we try to add a controller reference to - // a resource that already has a different one. - // NOTE(phisco): We need to set a field owner unique for each XR here, - // this prevents multiple XRs composing the same resource to be - // continuously alternated as controllers. - if err := c.client.Patch(ctx, cd.Resource, client.Apply, client.ForceOwnership, client.FieldOwner(ComposedFieldOwnerName(xr))); err != nil { - return CompositionResult{}, errors.Wrapf(err, errFmtApplyCD, name) - } - - resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready}) - } - return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events}, nil } diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 6b2595ab8..400b569b4 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -615,8 +615,8 @@ func TestFunctionCompose(t *testing.T) { want: want{ res: CompositionResult{ Composed: []ComposedResource{ - {ResourceName: "desired-resource-a"}, - {ResourceName: "observed-resource-a", Ready: true}, + {ResourceName: "desired-resource-a", Synced: true}, + {ResourceName: "observed-resource-a", Ready: true, Synced: true}, }, ConnectionDetails: managed.ConnectionDetails{ "from": []byte("function-pipeline"), @@ -816,8 +816,8 @@ func TestFunctionCompose(t *testing.T) { want: want{ res: CompositionResult{ Composed: []ComposedResource{ - {ResourceName: "desired-resource-a"}, - {ResourceName: "observed-resource-a", Ready: true}, + {ResourceName: "desired-resource-a", Synced: true}, + {ResourceName: "observed-resource-a", Ready: true, Synced: true}, }, ConnectionDetails: managed.ConnectionDetails{ "from": []byte("function-pipeline"), diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 6b3949ea6..023afa2fa 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -273,6 +273,21 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re o := []resource.ApplyOption{resource.MustBeControllableBy(xr.GetUID()), usage.RespectOwnerRefs()} o = append(o, mergeOptions(filterPatches(t.Patches, patchTypesFromXR()...))...) if err := c.client.Apply(ctx, cd, o...); err != nil { + if kerrors.IsInvalid(err) { + // We tried applying an invalid resource, we can't tell whether + // this means the resource will never be valid or it will if we + // run again the composition after some other resource is + // created or updated successfully. So, we emit a warning event + // and move on. + events = append(events, event.Warning(reasonCompose, errors.Wrap(err, errApplyComposed))) + // We unset the cd here so that we don't try to observe it + // later. This will also mean we report it as not ready and not + // synced. Resulting in the XR being reported as not ready nor + // synced too. + cds[i] = nil + continue + } + // TODO(negz): Include the template name (if any) in this error. // Including the rendered resource's kind may help too (e.g. if the // template is anonymous). @@ -298,7 +313,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // to observe it. We still want to return it to the Reconciler so that // it knows that this desired composed resource is not ready. if cd == nil { - resources[i] = ComposedResource{ResourceName: name, Ready: false} + resources[i] = ComposedResource{ResourceName: name, Synced: false, Ready: false} continue } @@ -328,7 +343,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re return CompositionResult{}, errors.Wrapf(err, errFmtCheckReadiness, name) } - resources[i] = ComposedResource{ResourceName: name, Ready: ready} + resources[i] = ComposedResource{ResourceName: name, Ready: ready, Synced: true} } // Call Apply so that we do not just replace fields on existing XR but diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index fe0d58476..52558ab59 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -392,6 +392,7 @@ func TestPTCompose(t *testing.T) { Composed: []ComposedResource{{ ResourceName: "cool-resource", Ready: true, + Synced: true, }}, ConnectionDetails: details, }, @@ -457,10 +458,12 @@ func TestPTCompose(t *testing.T) { { ResourceName: "cool-resource", Ready: true, + Synced: true, }, { ResourceName: "uncool-resource", Ready: false, + Synced: false, }, }, ConnectionDetails: details, diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index bdeb0b1bf..8c979d3ac 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -74,6 +74,7 @@ const ( errCompose = "cannot compose resources" errInvalidResources = "some resources were invalid, check events" errRenderCD = "cannot render composed resource" + errSyncResources = "cannot sync composed resources" reconcilePausedMsg = "Reconciliation (including deletion) is paused via the pause annotation" ) @@ -666,6 +667,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } var unready []ComposedResource + var unsynced []ComposedResource for i, cd := range res.Composed { // Specifying a name for P&T templates is optional but encouraged. // If there was no name, fall back to using the index. @@ -674,6 +676,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco id = strconv.Itoa(i) } + if !cd.Synced { + log.Debug("Composed resource is not yet synced", "id", id) + unsynced = append(unsynced, cd) + r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet synced", id))) + continue + } + if !cd.Ready { log.Debug("Composed resource is not yet ready", "id", id) unready = append(unready, cd) @@ -682,21 +691,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } - xr.SetConditions(xpv1.ReconcileSuccess()) - - // TODO(muvaf): If a resource becomes Unavailable at some point, should we - // still report it as Creating? - if len(unready) > 0 { - // We want to requeue to wait for our composed resources to - // become ready, since we can't watch them. - names := make([]string, len(unready)) - for i, cd := range unready { - names[i] = string(cd.ResourceName) - } - // Sort for stable condition messages. With functions, we don't have a - // stable order otherwise. - xr.SetConditions(xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, names)))) - + if updateXRConditions(xr, unsynced, unready) { // This requeue is subject to rate limiting. Requeues will exponentially // backoff from 1 to 30 seconds. See the 'definition' (XRD) reconciler // that sets up the ratelimiter. @@ -706,10 +701,39 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // We requeue after our poll interval because we can't watch composed // resources - we can't know what type of resources we might compose // when this controller is started. - xr.SetConditions(xpv1.Available()) return reconcile.Result{RequeueAfter: r.pollInterval(ctx, xr)}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } +// updateXRConditions updates the conditions of the supplied composite resource +// based on the supplied composed resources. It returns true if the XR should be +// requeued immediately. +func updateXRConditions(xr *composite.Unstructured, unsynced, unready []ComposedResource) (requeueImmediately bool) { + readyCond := xpv1.Available() + syncedCond := xpv1.ReconcileSuccess() + if len(unsynced) > 0 { + // We want to requeue to wait for our composed resources to + // become ready, since we can't watch them. + syncedCond = xpv1.ReconcileError(errors.New(errSyncResources)).WithMessage(fmt.Sprintf("Unsynced resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unsynced)))) + requeueImmediately = true + } + if len(unready) > 0 { + // We want to requeue to wait for our composed resources to + // become ready, since we can't watch them. + readyCond = xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unready)))) + requeueImmediately = true + } + xr.SetConditions(syncedCond, readyCond) + return requeueImmediately +} + +func getComposerResourcesNames(cds []ComposedResource) []string { + names := make([]string, len(cds)) + for i, cd := range cds { + names[i] = string(cd.ResourceName) + } + return names +} + // EnqueueForCompositionRevisionFunc returns a function that enqueues (the // related) XRs when a new CompositionRevision is created. This speeds up // reconciliation of XRs on changes to the Composition by not having to wait for diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index dda3711d5..5c42c3715 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -517,21 +517,27 @@ func TestReconcile(t *testing.T) { Composed: []ComposedResource{{ ResourceName: "elephant", Ready: false, + Synced: true, }, { ResourceName: "cow", Ready: false, + Synced: true, }, { ResourceName: "pig", Ready: true, + Synced: true, }, { ResourceName: "cat", Ready: false, + Synced: true, }, { ResourceName: "dog", Ready: true, + Synced: true, }, { ResourceName: "snake", Ready: false, + Synced: true, }}, }, nil })), From 320a2bec274ad96e3ce4e54b572902eca4a750e7 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 29 Feb 2024 10:17:13 +0000 Subject: [PATCH 050/370] chore: reword event and debug log Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/composite/reconciler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 8c979d3ac..a62f6a27b 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -677,9 +677,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } if !cd.Synced { - log.Debug("Composed resource is not yet synced", "id", id) + log.Debug("Composed resource is not yet valid", "id", id) unsynced = append(unsynced, cd) - r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet synced", id))) + r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet valid", id))) continue } From 4b641f8fb83554273c268d3dd22af1e3a0d3c1ed Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 29 Feb 2024 16:23:57 +0000 Subject: [PATCH 051/370] fix: consider both ready and sync Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/composite/reconciler.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index a62f6a27b..caa7a05c1 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -680,14 +680,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco log.Debug("Composed resource is not yet valid", "id", id) unsynced = append(unsynced, cd) r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet valid", id))) - continue } if !cd.Ready { log.Debug("Composed resource is not yet ready", "id", id) unready = append(unready, cd) r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet ready", id))) - continue } } From 4cfc571d8bf6163954997999b2aadacdca1c583e Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 29 Feb 2024 20:26:48 +0000 Subject: [PATCH 052/370] tests(e2e): add TestCompositionInvalidComposed Signed-off-by: Philippe Scorsolini --- test/e2e/apiextensions_test.go | 44 +++++++++++ .../invalid-composed/setup/composition.yaml | 75 +++++++++++++++++++ .../invalid-composed/setup/definition.yaml | 51 +++++++++++++ .../invalid-composed/setup/provider.yaml | 7 ++ .../composition/invalid-composed/xr.yaml | 11 +++ 5 files changed, 188 insertions(+) create mode 100644 test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml create mode 100644 test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml create mode 100644 test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml create mode 100644 test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 038edd2a9..667700ca0 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -103,6 +103,50 @@ func TestCompositionMinimal(t *testing.T) { ) } +// TestCompositionInvalidComposed tests Crossplane's Composition functionality, +// checking that although a composed resource is invalid, i.e. it didn't apply +// successfully. +func TestCompositionInvalidComposed(t *testing.T) { + manifests := "test/e2e/manifests/apiextensions/composition/invalid-composed" + + xrList := composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "example.org/v1alpha1", + Kind: "XParent", + }), composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "example.org/v1alpha1", + Kind: "XChild", + })) + + environment.Test(t, + features.New(t.Name()). + WithLabel(LabelArea, LabelAreaAPIExtensions). + WithLabel(LabelSize, LabelSizeSmall). + WithLabel(config.LabelTestSuite, config.TestSuiteDefault). + WithSetup("PrerequisitesAreCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "setup/provider.yaml", pkgv1.Healthy(), pkgv1.Active()), + )). + Assess("CreateXR", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "xr.yaml"), + funcs.InBackground(funcs.LogResources(xrList)), + funcs.InBackground(funcs.LogResources(nopList)), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "xr.yaml"), + )). + Assess("XRStillAnnotated", funcs.AllOf( + // Check the XR it has metadata.annotations set + funcs.ResourcesHaveFieldValueWithin(1*time.Minute, manifests, "xr.yaml", "metadata.annotations[exampleVal]", "foo"), + )). + WithTeardown("DeleteXR", funcs.AllOf( + funcs.DeleteResources(manifests, "xr.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "xr.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.ResourcesDeletedAfterListedAreGone(3*time.Minute, manifests, "setup/*.yaml", nopList)). + Feature(), + ) +} + // TestCompositionPatchAndTransform tests Crossplane's Composition functionality, // checking that a claim using patch-and-transform Composition will become // available when its composed resources do, and have a field derived from diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml new file mode 100644 index 000000000..6c9c38716 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: parent +spec: + compositeTypeRef: + apiVersion: example.org/v1alpha1 + kind: XParent + resources: + - name: child + base: + apiVersion: example.org/v1alpha1 + kind: XChild + spec: {} + patches: + - type: FromCompositeFieldPath + # this is going to be 1 + fromFieldPath: spec.someField + # this will fail because it's supposed to be > 1 + toFieldPath: spec.someField + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + metadata: + annotations: + exampleVal: "foo" + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + # we should still see this in the child + toFieldPath: metadata.annotations[something] + - type: ToCompositeFieldPath + fromFieldPath: metadata.annotations[exampleVal] + # we should still see this in the composite + toFieldPath: metadata.annotations[exampleVal] +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: child +spec: + compositeTypeRef: + apiVersion: example.org/v1alpha1 + kind: XChild + resources: + # we don't really care about what happens here, it's not going to work + # because the composite resource will be invalid + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: metadata.annotations[something] diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml new file mode 100644 index 000000000..192c76708 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml @@ -0,0 +1,51 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xparents.example.org +spec: + defaultCompositionRef: + name: parent + group: example.org + names: + kind: XParent + plural: xparents + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + someField: + # no limits on its value + type: integer +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xchildren.example.org +spec: + defaultCompositionRef: + name: child + group: example.org + names: + kind: XChild + plural: xchildren + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + someField: + minimum: 2 + type: integer diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml new file mode 100644 index 000000000..b82f2c560 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml @@ -0,0 +1,7 @@ +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-nop +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 + ignoreCrossplaneConstraints: true diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml new file mode 100644 index 000000000..a233b2916 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: example.org/v1alpha1 +kind: XParent +metadata: + name: test +# Expected: +# annotations: +# exampleVal: "foo" +spec: + # this should be > 1 in the XChild composed resource, so it will fail applying it + someField: 1 From 2aaf7cff6ee3cd19701c60ab5935bf85e712821d Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 6 Mar 2024 09:14:36 +0000 Subject: [PATCH 053/370] chore: reword sync error condition message Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/composite/reconciler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index caa7a05c1..2d5b69304 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -711,7 +711,7 @@ func updateXRConditions(xr *composite.Unstructured, unsynced, unready []Composed if len(unsynced) > 0 { // We want to requeue to wait for our composed resources to // become ready, since we can't watch them. - syncedCond = xpv1.ReconcileError(errors.New(errSyncResources)).WithMessage(fmt.Sprintf("Unsynced resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unsynced)))) + syncedCond = xpv1.ReconcileError(errors.New(errSyncResources)).WithMessage(fmt.Sprintf("Invalid resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unsynced)))) requeueImmediately = true } if len(unready) > 0 { From ef512a0950ad0f4adc8ab4b0a5965020ae4385c7 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Sat, 9 Mar 2024 15:36:46 +0300 Subject: [PATCH 054/370] Replay deletion with the same policy Signed-off-by: Hasan Turken --- .../apiextensions/usage/reconciler.go | 26 +++++++++-------- .../apiextensions/usage/reconciler_test.go | 2 +- internal/usage/handler.go | 28 +++++++++++++------ .../usage/standalone/with-by/usage.yaml | 1 + .../usage/standalone/with-by/used.yaml | 3 -- .../usage/standalone/with-by/using.yaml | 3 -- .../usage/standalone/with-reason/used.yaml | 3 -- test/e2e/usage_test.go | 6 ++-- 8 files changed, 38 insertions(+), 34 deletions(-) diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index b89f90cdc..f3ea237b5 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -317,17 +317,21 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } - if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil && used.GetAnnotations()[usage.AnnotationKeyFirstDeletionAttempt] != "" { - // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. - log.Debug("Replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName()) - go func() { - // We do the deletion async and after some delay to make sure the usage is deleted before the - // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear - // very soon. - time.Sleep(2 * time.Second) - // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. - _ = r.client.Delete(context.Background(), used) - }() + if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil { + if policy, ok := used.GetAnnotations()[usage.AnnotationKeyDeletionAttempt]; ok { + // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. + go func() { + // We do the deletion async and after some delay to make sure the usage is deleted before the + // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear + // very soon. + time.Sleep(2 * time.Second) + log.Info("Replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "policy", policy) + // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. + if err = r.client.Delete(context.Background(), used, client.PropagationPolicy(policy)); err != nil { + log.Info("Error when replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "err", err) + } + }() + } } // Remove the finalizer from the usage diff --git a/internal/controller/apiextensions/usage/reconciler_test.go b/internal/controller/apiextensions/usage/reconciler_test.go index 0a1aa78ef..3225b79b3 100644 --- a/internal/controller/apiextensions/usage/reconciler_test.go +++ b/internal/controller/apiextensions/usage/reconciler_test.go @@ -740,7 +740,7 @@ func TestReconcile(t *testing.T) { return nil } if o, ok := obj.(*composed.Unstructured); ok { - o.SetAnnotations(map[string]string{usage.AnnotationKeyFirstDeletionAttempt: time.Now().String()}) + o.SetAnnotations(map[string]string{usage.AnnotationKeyDeletionAttempt: string(metav1.DeletePropagationBackground)}) o.SetLabels(map[string]string{inUseLabelKey: "true"}) return nil } diff --git a/internal/usage/handler.go b/internal/usage/handler.go index 8ef0fb4c9..3956a536e 100644 --- a/internal/usage/handler.go +++ b/internal/usage/handler.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "net/http" - "time" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/yaml" "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -46,9 +46,10 @@ const ( // indexing and retrieving needed CRDs InUseIndexKey = "inuse.apiversion.kind.name" - // AnnotationKeyFirstDeletionAttempt is the annotation key used to record the timestamp for first deletion attempt - // which was blocked due to usage. - AnnotationKeyFirstDeletionAttempt = "usage.crossplane.io/first-deletion-attempt" + // AnnotationKeyDeletionAttempt is the annotation key used to record whether + // a deletion attempt was made and blocked by the Usage. The value stored is + // the propagation policy used with the deletion attempt. + AnnotationKeyDeletionAttempt = "usage.crossplane.io/deletion-attempt-with-policy" // Error strings. errFmtUnexpectedOp = "unexpected operation %q, expected \"DELETE\"" @@ -132,14 +133,18 @@ func (h *Handler) Handle(ctx context.Context, request admission.Request) admissi if err := u.UnmarshalJSON(request.OldObject.Raw); err != nil { return admission.Errored(http.StatusBadRequest, err) } - return h.validateNoUsages(ctx, u) + opts := &metav1.DeleteOptions{} + if err := yaml.Unmarshal(request.Options.Raw, opts); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + return h.validateNoUsages(ctx, u, opts) default: return admission.Errored(http.StatusBadRequest, errors.Errorf(errFmtUnexpectedOp, request.Operation)) } } -func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured) admission.Response { - h.log.Debug("Validating no usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName()) +func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured, opts *metav1.DeleteOptions) admission.Response { + h.log.Debug("Validating no usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "policy", opts.PropagationPolicy) usageList := &v1alpha1.UsageList{} if err := h.client.List(ctx, usageList, client.MatchingFields{InUseIndexKey: IndexValueForObject(u)}); err != nil { h.log.Debug("Error when getting Usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) @@ -149,11 +154,16 @@ func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstruct msg := inUseMessage(usageList) h.log.Debug("Usage found, deletion not allowed", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "msg", msg) + // Use the default propagation policy if not provided + policy := metav1.DeletePropagationBackground + if opts.PropagationPolicy != nil { + policy = *opts.PropagationPolicy + } // If the resource is being deleted, we want to record the first deletion attempt // so that we can track whether a deletion was attempted at least once. - if u.GetAnnotations() == nil || u.GetAnnotations()[AnnotationKeyFirstDeletionAttempt] == "" { + if u.GetAnnotations() == nil || u.GetAnnotations()[AnnotationKeyDeletionAttempt] != string(policy) { orig := u.DeepCopy() - xpmeta.AddAnnotations(u, map[string]string{AnnotationKeyFirstDeletionAttempt: metav1.Now().Format(time.RFC3339)}) + xpmeta.AddAnnotations(u, map[string]string{AnnotationKeyDeletionAttempt: string(policy)}) // Patch the resource to add the deletion attempt annotation if err := h.client.Patch(ctx, u, client.MergeFrom(orig)); err != nil { h.log.Debug("Error when patching the resource to add the deletion attempt annotation", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml index 6c54d3b9b..e1cd38bcc 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml @@ -3,6 +3,7 @@ kind: Usage metadata: name: using-uses-used spec: + replayDeletion: true of: apiVersion: nop.crossplane.io/v1alpha1 kind: NopResource diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml index da0ec9f17..3fa17b096 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml @@ -7,9 +7,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml index b182b1d5a..62a6edcac 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml @@ -5,9 +5,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml index ace8b8701..af0060395 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml @@ -7,9 +7,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/usage_test.go b/test/e2e/usage_test.go index 9fcb0608e..065c504b7 100644 --- a/test/e2e/usage_test.go +++ b/test/e2e/usage_test.go @@ -59,10 +59,8 @@ func TestUsageStandalone(t *testing.T) { funcs.DeleteResources(manifests, "with-by/using.yaml"), funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/using.yaml"), funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/usage.yaml"), - - // Deletion of used resource should be allowed after usage is cleared. - funcs.DeleteResources(manifests, "with-by/used.yaml"), - funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/used.yaml"), + // We have "replayDeletion: true" on the usage, deletion of used resource should be replayed after usage is cleared. + funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "with-by/used.yaml"), ), }, { From b593288a667a80790d476d6fa03a6c916d20a715 Mon Sep 17 00:00:00 2001 From: avoidalone Date: Mon, 11 Mar 2024 15:05:43 +0800 Subject: [PATCH 055/370] chore: remove repetitive words Signed-off-by: avoidalone --- design/defunct/one-pager-consuming-k8s-clusters.md | 2 +- design/defunct/one-pager-default-resource-class.md | 2 +- design/defunct/one-pager-strongly-typed-class.md | 2 +- design/design-doc-packages-v2.md | 6 +++--- design/one-pager-resource-connectivity-mvp.md | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/design/defunct/one-pager-consuming-k8s-clusters.md b/design/defunct/one-pager-consuming-k8s-clusters.md index 6c32ccb02..359e5d0c4 100644 --- a/design/defunct/one-pager-consuming-k8s-clusters.md +++ b/design/defunct/one-pager-consuming-k8s-clusters.md @@ -304,7 +304,7 @@ Because the scheduling of `KubernetesApplication` resources is now isolated to target the `KubernetesTarget` resource, more intelligent scheduling can be enabled without touching other parts of the Crossplane ecosystem. Previously, a `KubernetesCluster` claim was used for claiming, consuming, and dynamically -provisioning Kubernetes cluster resources so changes to the the API type related +provisioning Kubernetes cluster resources so changes to the API type related to scheduling (i.e. consuming) could unintentionally affect those other capabilities as well. Potential future scheduling improvements could involve price, latency, and geographic optimization by surfacing additional fields or diff --git a/design/defunct/one-pager-default-resource-class.md b/design/defunct/one-pager-default-resource-class.md index 941197a9c..d804cb281 100644 --- a/design/defunct/one-pager-default-resource-class.md +++ b/design/defunct/one-pager-default-resource-class.md @@ -36,7 +36,7 @@ that can be consumed in a portable manner across cloud providers. ### Original State -Originally, resource claims had to explicitly declare the the underlying +Originally, resource claims had to explicitly declare the underlying resource class that they want to inherit the configuration from on deployment. For example, the following resource class could be declared for a Postgres RDS database instance on AWS: diff --git a/design/defunct/one-pager-strongly-typed-class.md b/design/defunct/one-pager-strongly-typed-class.md index 334590907..d2971a000 100644 --- a/design/defunct/one-pager-strongly-typed-class.md +++ b/design/defunct/one-pager-strongly-typed-class.md @@ -86,7 +86,7 @@ spec: engineVersion: "9.6" ``` -This model is powerful because it allows an application developer to create a resource claim without having to know the implementation details or even the underlying provider. However, the fact that every resource class is of the same `kind` presents a key issue: The required parameters for a resource class may vary widely, and they are currently only provided as an arbitrary map that is eventually read by the controller for the the specified `provisioner`. Therefore, an administrator who is creating resource classes does not know what fields are required and will not be notified of missing or extraneous fields until the provisioning of a resource that references the class. +This model is powerful because it allows an application developer to create a resource claim without having to know the implementation details or even the underlying provider. However, the fact that every resource class is of the same `kind` presents a key issue: The required parameters for a resource class may vary widely, and they are currently only provided as an arbitrary map that is eventually read by the controller for the specified `provisioner`. Therefore, an administrator who is creating resource classes does not know what fields are required and will not be notified of missing or extraneous fields until the provisioning of a resource that references the class. The `parameters` supplied by the resource class are used to populate the `spec` of the managed resource (i.e. the Kubernetes representation of the external resource) when it is created. For instance, the creation of `mysql-claim`, which references the `standard-mysql` class, is watched by the claim controller for AWS RDS instances. It brings together the information provided in the claim and class to create the `RDSInstance` managed resource. Specifically, it calls the `ConfigureMyRDSInstance()` function. As part of the configuration, the function creates the `spec` of the `RDSInstance` managed resource from the `parameters` of the `ResourceClass`: diff --git a/design/design-doc-packages-v2.md b/design/design-doc-packages-v2.md index 5a109bf9b..65fd360e3 100644 --- a/design/design-doc-packages-v2.md +++ b/design/design-doc-packages-v2.md @@ -161,7 +161,7 @@ workflow the package manager uses for installing a `Package`. We will use a [stdout](https://github.com/crossplane/crossplane/blob/a0d139f7cf269599ba916ed15af3fd68ffeabbdf/cmd/crossplane/package/unpack/unpack.go#L53). 4. The _`ClusterPackageInstall` controller_ waits for the `Job` to complete successfully before reading the logs from the `Pod`. When the `Job` is - complete, it reads the the logs and creates all of the objects that were + complete, it reads the logs and creates all of the objects that were printed, making a [few modifications as well as annotating and labelling appropriately](https://github.com/crossplane/crossplane/blob/6fc50822fbf11a7d31f8a9dabde5c8948c3b36ac/pkg/controller/packages/install/installjob.go#L259). 5. The _`Package` controller_ observes the `Package` creation and assumes the @@ -714,7 +714,7 @@ will operate with the following behavior. `spec.desiredState: Active`. 3. Set `status.currentRevision` to full image name used for `PackageRevision` (this can be obtained from the `Pod` in the install `Job`) -4. Report status of `PackageRevision` in the the status of the `Package`. +4. Report status of `PackageRevision` in the status of the `Package`. **`Package` Created with `spec.revisionActivationPolicy: Manual`** @@ -723,7 +723,7 @@ will operate with the following behavior. `spec.desiredState: Inactive`. 3. Set `status.currentRevision` to full image name used for `PackageRevision` (this can be obtained from the `Pod` in the install `Job`) -4. Report status of `PackageRevision` in the the status of the `Package`. +4. Report status of `PackageRevision` in the status of the `Package`. User is responsible for manually setting the `PackageRevision` to `Active`. diff --git a/design/one-pager-resource-connectivity-mvp.md b/design/one-pager-resource-connectivity-mvp.md index 8544b922b..27ec3fd28 100644 --- a/design/one-pager-resource-connectivity-mvp.md +++ b/design/one-pager-resource-connectivity-mvp.md @@ -688,7 +688,7 @@ resources need to be created beforehand: and associating it with a set of subnets. In addition, `RDSInstance`s also need the following resources, so that they are -accessible by the the worker nodes: +accessible by the worker nodes: * `DBSubnetGroup`: represents a group of `Subnet`s from different availability zones, @@ -964,7 +964,7 @@ ensure connectivity. We will need to wait until *after* the Wordspress stack is installed to create the VNet Rule on the MySQL DB due to the fact that the database will not exist -until the the stack references our `SQLServerClass` with a claim. +until the stack references our `SQLServerClass` with a claim. #### A Model for Deploying Wordpress From d347ff385c1701ba2dbc545b04c1bb56e9fe4767 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Wed, 13 Mar 2024 16:11:09 +0300 Subject: [PATCH 056/370] Allow group changes in composed templates Fixes #5473 Signed-off-by: Hasan Turken --- .../composite/composition_render.go | 14 ++++++----- .../composite/composition_render_test.go | 23 +++++++++---------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/internal/controller/apiextensions/composite/composition_render.go b/internal/controller/apiextensions/composite/composition_render.go index 2cb2cb2ff..ee7e2dd02 100644 --- a/internal/controller/apiextensions/composite/composition_render.go +++ b/internal/controller/apiextensions/composite/composition_render.go @@ -32,8 +32,8 @@ const ( errMarshalProtoStruct = "cannot marshal protobuf Struct to JSON" errSetControllerRef = "cannot set controller reference" - errFmtKindOrGroupChanged = "cannot change the kind or group of a composed resource from %s to %s (possible composed resource template mismatch)" - errFmtNamePrefixLabel = "cannot find top-level composite resource name label %q in composite resource metadata" + errFmtKindChanged = "cannot change the kind of a composed resource from %s to %s (possible composed resource template mismatch)" + errFmtNamePrefixLabel = "cannot find top-level composite resource name label %q in composite resource metadata" // TODO(negz): Include more detail such as field paths if they exist. // Perhaps require each patch type to have a String() method to help @@ -61,16 +61,18 @@ func RenderFromJSON(o resource.Object, data []byte) error { o.SetName(name) o.SetNamespace(namespace) - // This resource already had a GK (probably because it already exists), but + // This resource already had a Kind (probably because it already exists), but // when we rendered its template it changed. This shouldn't happen. Either - // someone changed the kind or group in the template, or we're trying to use the + // someone changed the kind in the template, or we're trying to use the // wrong template (e.g. because the order of an array of anonymous templates // changed). // Please note, we don't check for version changes, as versions can change. For example, // if a composed resource was created with a template that has a version of "v1alpha1", // and then the template is updated to "v1beta1", the composed resource will still be valid. - if !gvk.Empty() && o.GetObjectKind().GroupVersionKind().GroupKind() != gvk.GroupKind() { - return errors.Errorf(errFmtKindOrGroupChanged, gvk, o.GetObjectKind().GroupVersionKind()) + // We also don't check for group changes, as groups can change during + // migrations. + if !gvk.Empty() && o.GetObjectKind().GroupVersionKind().Kind != gvk.Kind { + return errors.Errorf(errFmtKindChanged, gvk, o.GetObjectKind().GroupVersionKind()) } return nil diff --git a/internal/controller/apiextensions/composite/composition_render_test.go b/internal/controller/apiextensions/composite/composition_render_test.go index e4ecfb8a3..a2dd96f24 100644 --- a/internal/controller/apiextensions/composite/composition_render_test.go +++ b/internal/controller/apiextensions/composite/composition_render_test.go @@ -62,38 +62,37 @@ func TestRenderFromJSON(t *testing.T) { err: errors.Wrap(errInvalidChar, errUnmarshalJSON), }, }, - "ExistingGroupChanged": { - reason: "We should return an error if unmarshalling the base template changed the composed resource's group.", + "ExistingKindChanged": { + reason: "We should return an error if unmarshalling the base template changed the composed resource's kind.", args: args{ o: composed.New(composed.FromReference(corev1.ObjectReference{ APIVersion: "example.org/v1", Kind: "Potato", })), - data: []byte(`{"apiVersion": "foo.io/v1", "kind": "Potato"}`), + data: []byte(`{"apiVersion": "example.org/v1", "kind": "Different"}`), }, want: want{ o: composed.New(composed.FromReference(corev1.ObjectReference{ - APIVersion: "foo.io/v1", - Kind: "Potato", + APIVersion: "example.org/v1", + Kind: "Different", })), - err: errors.Errorf(errFmtKindOrGroupChanged, "example.org/v1, Kind=Potato", "foo.io/v1, Kind=Potato"), + err: errors.Errorf(errFmtKindChanged, "example.org/v1, Kind=Potato", "example.org/v1, Kind=Different"), }, }, - "ExistingKindChanged": { - reason: "We should return an error if unmarshalling the base template changed the composed resource's kind.", + "GroupCanChange": { + reason: "We should accept group changes in the base template.", args: args{ o: composed.New(composed.FromReference(corev1.ObjectReference{ APIVersion: "example.org/v1", Kind: "Potato", })), - data: []byte(`{"apiVersion": "example.org/v1", "kind": "Different"}`), + data: []byte(`{"apiVersion": "foo.io/v1", "kind": "Potato"}`), }, want: want{ o: composed.New(composed.FromReference(corev1.ObjectReference{ - APIVersion: "example.org/v1", - Kind: "Different", + APIVersion: "foo.io/v1", + Kind: "Potato", })), - err: errors.Errorf(errFmtKindOrGroupChanged, "example.org/v1, Kind=Potato", "example.org/v1, Kind=Different"), }, }, "VersionCanChange": { From 76fdf072330959e526a3a2435321b6a08829212e Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 14 Mar 2024 11:00:07 +0300 Subject: [PATCH 057/370] Bump default max reconcile rate and resource limits Signed-off-by: Hasan Turken --- cluster/charts/crossplane/README.md | 4 ++-- cluster/charts/crossplane/values.yaml | 4 ++-- cmd/crossplane/core/core.go | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 5b162359b..95b7bd5ff 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -103,8 +103,8 @@ and their default values. | `registryCaBundleConfig.key` | The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `registryCaBundleConfig.name` | The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `replicas` | The number of Crossplane pod `replicas` to deploy. | `1` | -| `resourcesCrossplane.limits.cpu` | CPU resource limits for the Crossplane pod. | `"100m"` | -| `resourcesCrossplane.limits.memory` | Memory resource limits for the Crossplane pod. | `"512Mi"` | +| `resourcesCrossplane.limits.cpu` | CPU resource limits for the Crossplane pod. | `"500m"` | +| `resourcesCrossplane.limits.memory` | Memory resource limits for the Crossplane pod. | `"1024Mi"` | | `resourcesCrossplane.requests.cpu` | CPU resource requests for the Crossplane pod. | `"100m"` | | `resourcesCrossplane.requests.memory` | Memory resource requests for the Crossplane pod. | `"256Mi"` | | `resourcesRBACManager.limits.cpu` | CPU resource limits for the RBAC Manager pod. | `"100m"` | diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index d4166d9db..1a9b7f313 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -85,9 +85,9 @@ priorityClassName: "" resourcesCrossplane: limits: # -- CPU resource limits for the Crossplane pod. - cpu: 100m + cpu: 500m # -- Memory resource limits for the Crossplane pod. - memory: 512Mi + memory: 1024Mi requests: # -- CPU resource requests for the Crossplane pod. cpu: 100m diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 358ffafac..d82dd5522 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -93,9 +93,9 @@ type startCommand struct { PackageRuntime string `default:"Deployment" env:"PACKAGE_RUNTIME" helm:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)"` - SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` - PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` - MaxReconcileRate int `default:"10" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` + SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` + PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` + MaxReconcileRate int `default:"100" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` WebhookEnabled bool `default:"true" env:"WEBHOOK_ENABLED" help:"Enable webhook configuration."` From e5b1ed55612c8d1675e41a1a451877c6f6503516 Mon Sep 17 00:00:00 2001 From: Atze de Vries Date: Wed, 13 Mar 2024 10:58:55 +0100 Subject: [PATCH 058/370] feat: Allow setting annotations on service object Allow setting annotations on the kubernetes service object. In GPC we need annotations to expose a service as a NEG which is required in order for the k8s controlplane to reach the webhook. This pr will add the ability to add annotations to the service object Signed-off-by: Atze de Vries --- cluster/charts/crossplane/templates/service.yaml | 5 +++++ cluster/charts/crossplane/values.yaml | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/cluster/charts/crossplane/templates/service.yaml b/cluster/charts/crossplane/templates/service.yaml index d4ca47a64..4bb496a39 100644 --- a/cluster/charts/crossplane/templates/service.yaml +++ b/cluster/charts/crossplane/templates/service.yaml @@ -8,6 +8,11 @@ metadata: app: {{ template "crossplane.name" . }} release: {{ .Release.Name }} {{- include "crossplane.labels" . | indent 4 }} + annotations: + {{- with .Values.service.annotations }} + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} spec: selector: app: {{ template "crossplane.name" . }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index d4166d9db..fdd7d35d2 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -57,6 +57,10 @@ registryCaBundleConfig: # -- The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. key: "" +service: + # -- Configure annotations on the service object. Only enabled when webhooks.enabled = true + annotations: {} + webhooks: # -- Enable webhooks for Crossplane and installed Provider packages. enabled: true From 558dad8b680fa227016782fe08f03fe9d7fee559 Mon Sep 17 00:00:00 2001 From: Atze de Vries Date: Wed, 13 Mar 2024 11:08:21 +0100 Subject: [PATCH 059/370] fix linting and rename to internal standards Signed-off-by: Atze de Vries --- cluster/charts/crossplane/README.md | 1 + cluster/charts/crossplane/templates/service.yaml | 3 ++- cluster/charts/crossplane/values.yaml | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 5b162359b..760784c81 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -119,6 +119,7 @@ and their default values. | `securityContextRBACManager.readOnlyRootFilesystem` | Set the RBAC Manager pod root file system as read-only. | `true` | | `securityContextRBACManager.runAsGroup` | The group ID used by the RBAC Manager pod. | `65532` | | `securityContextRBACManager.runAsUser` | The user ID used by the RBAC Manager pod. | `65532` | +| `service.annotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | | `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` | | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | diff --git a/cluster/charts/crossplane/templates/service.yaml b/cluster/charts/crossplane/templates/service.yaml index 4bb496a39..e4ba77201 100644 --- a/cluster/charts/crossplane/templates/service.yaml +++ b/cluster/charts/crossplane/templates/service.yaml @@ -9,10 +9,11 @@ metadata: release: {{ .Release.Name }} {{- include "crossplane.labels" . | indent 4 }} annotations: - {{- with .Values.service.annotations }} + {{- with .Values.service.customAnnotations }} {{- range $key, $value := . }} {{ $key }}: {{ $value | quote }} {{- end }} + {{- end }} spec: selector: app: {{ template "crossplane.name" . }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index fdd7d35d2..a8e5091da 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -59,7 +59,7 @@ registryCaBundleConfig: service: # -- Configure annotations on the service object. Only enabled when webhooks.enabled = true - annotations: {} + customAnnotations: {} webhooks: # -- Enable webhooks for Crossplane and installed Provider packages. From 3a9740c31579f0edee4b2d8501dfaaf78a1734d7 Mon Sep 17 00:00:00 2001 From: Atze de Vries Date: Thu, 14 Mar 2024 12:06:10 +0100 Subject: [PATCH 060/370] fix rendering docs Signed-off-by: Atze de Vries --- cluster/charts/crossplane/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 760784c81..d7d0b5d58 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -119,7 +119,7 @@ and their default values. | `securityContextRBACManager.readOnlyRootFilesystem` | Set the RBAC Manager pod root file system as read-only. | `true` | | `securityContextRBACManager.runAsGroup` | The group ID used by the RBAC Manager pod. | `65532` | | `securityContextRBACManager.runAsUser` | The user ID used by the RBAC Manager pod. | `65532` | -| `service.annotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | +| `service.customAnnotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | | `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` | | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | From 50da88d44e3013539f5084196b5529844f6f3430 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Tue, 19 Mar 2024 03:42:14 +0300 Subject: [PATCH 061/370] Run Renovate as a Github Action Signed-off-by: Mehmet Enes --- .github/renovate.json5 | 12 +++++++++++- .github/workflows/renovate.yml | 31 +++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/renovate.yml diff --git a/.github/renovate.json5 b/.github/renovate.json5 index d2318534f..c57872db0 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -130,5 +130,15 @@ ], "enabled": false } - ] + ], + postUpgradeTasks: { +// Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "make generate" + ], + fileFilters: [ + ["**/*"] + ], + executionMode: "branch", + } } diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml new file mode 100644 index 000000000..ab00962d5 --- /dev/null +++ b/.github/workflows/renovate.yml @@ -0,0 +1,31 @@ +name: Renovate +on: + # Allows manual/automated trigger for debugging purposes + workflow_dispatch: + inputs: + logLevel: + description: "Override default log level" + required: false + default: "info" + type: string + overrideSchedule: + description: "Override all schedules" + required: false + default: "false" + type: string + schedule: + - cron: '0 8 * * *' +jobs: + renovate: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4.1.1 + # Don't waste time starting Renovate if JSON is invalid + - name: Validate Renovate JSON + run: jq type .github/renovate.json + - name: Self-hosted Renovate + uses: renovatebot/github-action@v40.1.5 + with: + configurationFile: .github/renovate.json5 + token: ${{ secrets.RENOVATE_TOKEN }} From d8eed29efe2038f0957730e85afd9e7ef407bbb2 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Wed, 20 Mar 2024 02:06:48 +0300 Subject: [PATCH 062/370] Change Renovate config validator to official one Signed-off-by: Mehmet Enes --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index ab00962d5..4c29bf2b0 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -23,7 +23,7 @@ jobs: uses: actions/checkout@v4.1.1 # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON - run: jq type .github/renovate.json + run: npx --yes --package renovate -- renovate-config-validator - name: Self-hosted Renovate uses: renovatebot/github-action@v40.1.5 with: From d89835576d6ef9914df95275bc96e2fad2421615 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 21 Mar 2024 08:55:03 +0000 Subject: [PATCH 063/370] fix(deps): update module github.com/docker/docker to v25.0.5+incompatible [security] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f62cc02a5..b1872ae62 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/alecthomas/kong v0.8.1 github.com/bufbuild/buf v1.27.2 github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 - github.com/docker/docker v25.0.2+incompatible + github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.1 github.com/go-git/go-billy/v5 v5.5.0 diff --git a/go.sum b/go.sum index fba4a06b2..b7d23c62b 100644 --- a/go.sum +++ b/go.sum @@ -159,8 +159,8 @@ github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1x github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY= -github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= From 053bcdb11f41d269e292421346edea6ef6200470 Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Thu, 21 Mar 2024 12:47:59 +0300 Subject: [PATCH 064/370] Update .github/renovate.json5 Co-authored-by: Philippe Scorsolini Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- .github/renovate.json5 | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index c57872db0..0233d7fc6 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -136,9 +136,7 @@ "commands": [ "make generate" ], - fileFilters: [ - ["**/*"] - ], + fileFilters: ["**/*"], executionMode: "branch", } } From cde616afca0109a0e56a924ff5dd6cd00a4afa30 Mon Sep 17 00:00:00 2001 From: Argannor Date: Fri, 22 Mar 2024 18:05:18 +0100 Subject: [PATCH 065/370] feat(helm): add dnsPolicy parameter #5500 Signed-off-by: Argannor --- cluster/charts/crossplane/README.md | 5 ++++- cluster/charts/crossplane/templates/deployment.yaml | 1 + cluster/charts/crossplane/values.yaml | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 95b7bd5ff..622868448 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -63,6 +63,7 @@ including all the custom resources and controllers. The following tables lists the configurable parameters of the Crossplane chart and their default values. + | Parameter | Description | Default | | --- | --- | --- | | `affinity` | Add `affinities` to the Crossplane pod deployment. | `{}` | @@ -71,12 +72,13 @@ and their default values. | `customAnnotations` | Add custom `annotations` to the Crossplane pod deployment. | `{}` | | `customLabels` | Add custom `labels` to the Crossplane pod deployment. | `{}` | | `deploymentStrategy` | The deployment strategy for the Crossplane and RBAC Manager pods. | `"RollingUpdate"` | +| `dnsPolicy` | Specify the `dnsPolicy` to be used by the Crossplane pod. | `"ClusterFirst"` | | `extraEnvVarsCrossplane` | Add custom environmental variables to the Crossplane pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraEnvVarsRBACManager` | Add custom environmental variables to the RBAC Manager pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraObjects` | To add arbitrary Kubernetes Objects during a Helm Install | `[]` | | `extraVolumeMountsCrossplane` | Add custom `volumeMounts` to the Crossplane pod. | `{}` | | `extraVolumesCrossplane` | Add custom `volumes` to the Crossplane pod. | `{}` | -| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. | `false` | +| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet` | `false` | | `image.pullPolicy` | The image pull policy used for Crossplane and RBAC Manager pods. | `"IfNotPresent"` | | `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/crossplane/crossplane"` | | `image.tag` | The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`. | `""` | @@ -123,6 +125,7 @@ and their default values. | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | + ### Command Line You can pass the settings with helm command line parameters. Specify each diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index 8bde3f5e4..4413e8730 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -239,3 +239,4 @@ spec: {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | nindent 8 }} {{- end }} + dnsPolicy: {{ .Values.dnsPolicy }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 1a9b7f313..e572fb993 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -22,9 +22,12 @@ tolerations: [] # -- Add `affinities` to the Crossplane pod deployment. affinity: {} -# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. +# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet` hostNetwork: false +# -- Specify the `dnsPolicy` to be used by the Crossplane pod. +dnsPolicy: "ClusterFirst" + # -- Add custom `labels` to the Crossplane pod deployment. customLabels: {} From 7f5baac7be3b45a740b0e6066da805164b99b023 Mon Sep 17 00:00:00 2001 From: Argannor Date: Fri, 22 Mar 2024 18:07:40 +0100 Subject: [PATCH 066/370] chore(helm): correct punctuation Signed-off-by: Argannor --- cluster/charts/crossplane/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index e572fb993..61ec10ef4 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -22,7 +22,7 @@ tolerations: [] # -- Add `affinities` to the Crossplane pod deployment. affinity: {} -# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet` +# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. hostNetwork: false # -- Specify the `dnsPolicy` to be used by the Crossplane pod. From 8da7e8a2ed4818c060a24f9437de95c79987110e Mon Sep 17 00:00:00 2001 From: Argannor Date: Fri, 22 Mar 2024 18:09:14 +0100 Subject: [PATCH 067/370] chore(helm): regenerate documentation Signed-off-by: Argannor --- cluster/charts/crossplane/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 622868448..1db07a670 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -78,7 +78,7 @@ and their default values. | `extraObjects` | To add arbitrary Kubernetes Objects during a Helm Install | `[]` | | `extraVolumeMountsCrossplane` | Add custom `volumeMounts` to the Crossplane pod. | `{}` | | `extraVolumesCrossplane` | Add custom `volumes` to the Crossplane pod. | `{}` | -| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet` | `false` | +| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. | `false` | | `image.pullPolicy` | The image pull policy used for Crossplane and RBAC Manager pods. | `"IfNotPresent"` | | `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/crossplane/crossplane"` | | `image.tag` | The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`. | `""` | From 424fac0d4fc82bc8ae091609b923581a12bfc720 Mon Sep 17 00:00:00 2001 From: majorteach Date: Sat, 23 Mar 2024 17:11:45 +0800 Subject: [PATCH 068/370] chore: remove repetitive words Signed-off-by: majorteach --- design/design-doc-rbac-manager.md | 2 +- test/e2e/manifests/pkg/configuration/private/pull-secret.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/design/design-doc-rbac-manager.md b/design/design-doc-rbac-manager.md index c5df91fb5..59f5669b0 100644 --- a/design/design-doc-rbac-manager.md +++ b/design/design-doc-rbac-manager.md @@ -20,7 +20,7 @@ Crossplane, as a project, consists of three key building blocks: Each provider is a distinct process that is typically deployed as a pod. The API extensions and package manager controllers are part of the 'core' Crossplane controller manager process. The core controller manager is therefore responsible -for _extending Crossplane_. Its controllers add and and remove Custom Resource +for _extending Crossplane_. Its controllers add remove Custom Resource Definitions (CRDs) to and from the API server. The core Crossplane controllers define custom resources (CRs) that represent: diff --git a/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml b/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml index c647aba4e..2494100d9 100644 --- a/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml +++ b/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml @@ -6,7 +6,7 @@ # --docker-username=_json_key \ # --docker-password="$(cat service-account-key.json)" # -# We're okay with having these credentials checked in in plaintext because the +# We're okay with having these credentials checked in plaintext because the # service account they grant access to only has permission to pull from the # xp-install-test Google Artifact Registry, which contains only the empty test # Configuration defined by crossplane.yaml. From aeab3da47dc6d73e085810c1d1a5262e9844726c Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Sat, 23 Mar 2024 18:45:35 +0300 Subject: [PATCH 069/370] Update .github/workflows/renovate.yml Co-authored-by: Philippe Scorsolini Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- .github/workflows/renovate.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 4c29bf2b0..ce3904f7c 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -18,6 +18,9 @@ on: jobs: renovate: runs-on: ubuntu-latest + if: | + !github.event.repository.fork && + !github.event.pull_request.head.repo.fork steps: - name: Checkout uses: actions/checkout@v4.1.1 From 4e334e031de455b5af56d69b2eb2606cd50af98e Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Sat, 23 Mar 2024 18:48:00 +0300 Subject: [PATCH 070/370] Update .github/workflows/renovate.yml Co-authored-by: Philippe Scorsolini Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- .github/workflows/renovate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index ce3904f7c..50debb587 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -29,6 +29,8 @@ jobs: run: npx --yes --package renovate -- renovate-config-validator - name: Self-hosted Renovate uses: renovatebot/github-action@v40.1.5 + env: + RENOVATE_REPOSITORIES: ${{ github.repository }} with: configurationFile: .github/renovate.json5 token: ${{ secrets.RENOVATE_TOKEN }} From 07113d2a880b9fb6125d129a20dcaa0476270626 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sat, 23 Mar 2024 16:19:27 +0000 Subject: [PATCH 071/370] chore: regenerate README.md Signed-off-by: Philippe Scorsolini --- cluster/charts/crossplane/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 1db07a670..2fb587f6d 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -63,7 +63,6 @@ including all the custom resources and controllers. The following tables lists the configurable parameters of the Crossplane chart and their default values. - | Parameter | Description | Default | | --- | --- | --- | | `affinity` | Add `affinities` to the Crossplane pod deployment. | `{}` | @@ -125,7 +124,6 @@ and their default values. | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | - ### Command Line You can pass the settings with helm command line parameters. Specify each From 835527b3a1b9f80739823d53eb2fe82cee8b66e4 Mon Sep 17 00:00:00 2001 From: Simon Larsen <104437376+simlarsen@users.noreply.github.com> Date: Sun, 24 Mar 2024 13:35:15 +0000 Subject: [PATCH 072/370] Update ADOPTERS.md Signed-off-by: Simon Larsen <104437376+simlarsen@users.noreply.github.com> --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 6738f6975..4a9b8c6ec 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -79,3 +79,4 @@ This list is sorted in the order that organizations were added to it. | [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| | [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | +| [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | From 9067b9f1a40a40b5606e4351fccb40a32303992e Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Mon, 25 Mar 2024 10:24:04 +0200 Subject: [PATCH 073/370] Update Upjet SIG Signed-off-by: Jean du Plessis --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1f1df9fbb..788729913 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ find the meetings in the [Community Calendar][community calendar]. - [#sig-observe-only][sig-observe-only-slack] - [#sig-provider-families][sig-provider-families-slack] - [#sig-secret-stores][sig-secret-stores-slack] -- [#sig-upjet-provider-efficiency][sig-upjet-provider-efficiency-slack] +- [#sig-upjet][sig-upjet-slack] ## Adopters @@ -130,4 +130,4 @@ Crossplane is under the Apache 2.0 license. [sig-observe-only-slack]: https://crossplane.slack.com/archives/C04D5988QEA [sig-provider-families-slack]: https://crossplane.slack.com/archives/C056YAQRV16 [sig-secret-stores-slack]: https://crossplane.slack.com/archives/C05BY7DKFV2 -[sig-upjet-provider-efficiency-slack]: https://crossplane.slack.com/archives/C04QLETDJGN +[sig-upjet-slack]: https://crossplane.slack.com/archives/C05T19TB729 From 6c145e96a0c2f88309a9049f288ee1344081c05c Mon Sep 17 00:00:00 2001 From: Matt Field Date: Mon, 25 Mar 2024 14:24:00 +0000 Subject: [PATCH 074/370] Adds Xata entry to ADOPTERS Signed-off-by: Matt Field --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 4a9b8c6ec..5452bd85e 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -80,3 +80,4 @@ This list is sorted in the order that organizations were added to it. | [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | | [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | +| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other reosurces that provision cells of internal [Xata](https://xata.io) services. | From b6211f47ca4b707619a9c251e126c181aac314b3 Mon Sep 17 00:00:00 2001 From: Matt Field Date: Mon, 25 Mar 2024 14:29:34 +0000 Subject: [PATCH 075/370] spelling Signed-off-by: Matt Field --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 5452bd85e..5640f767c 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -80,4 +80,4 @@ This list is sorted in the order that organizations were added to it. | [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | | [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | -| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other reosurces that provision cells of internal [Xata](https://xata.io) services. | +| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | From 21af307653e608d82c59c89c2629682fb7c177f4 Mon Sep 17 00:00:00 2001 From: Argannor Date: Mon, 25 Mar 2024 16:15:26 +0100 Subject: [PATCH 076/370] feat(helm): set default of dnsPolicy to empty string Signed-off-by: Argannor --- cluster/charts/crossplane/README.md | 2 +- cluster/charts/crossplane/templates/deployment.yaml | 4 +++- cluster/charts/crossplane/values.yaml | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 1db07a670..8c3cde07b 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -72,7 +72,7 @@ and their default values. | `customAnnotations` | Add custom `annotations` to the Crossplane pod deployment. | `{}` | | `customLabels` | Add custom `labels` to the Crossplane pod deployment. | `{}` | | `deploymentStrategy` | The deployment strategy for the Crossplane and RBAC Manager pods. | `"RollingUpdate"` | -| `dnsPolicy` | Specify the `dnsPolicy` to be used by the Crossplane pod. | `"ClusterFirst"` | +| `dnsPolicy` | Specify the `dnsPolicy` to be used by the Crossplane pod. | `""` | | `extraEnvVarsCrossplane` | Add custom environmental variables to the Crossplane pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraEnvVarsRBACManager` | Add custom environmental variables to the RBAC Manager pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraObjects` | To add arbitrary Kubernetes Objects during a Helm Install | `[]` | diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index 4413e8730..d6e31a7a3 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -239,4 +239,6 @@ spec: {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | nindent 8 }} {{- end }} - dnsPolicy: {{ .Values.dnsPolicy }} + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 61ec10ef4..83f3d896d 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -26,7 +26,7 @@ affinity: {} hostNetwork: false # -- Specify the `dnsPolicy` to be used by the Crossplane pod. -dnsPolicy: "ClusterFirst" +dnsPolicy: "" # -- Add custom `labels` to the Crossplane pod deployment. customLabels: {} From ea7fb27c93933d992bf5aace82ef56c7dcb54148 Mon Sep 17 00:00:00 2001 From: David Adelowo Date: Mon, 25 Mar 2024 08:59:59 -0600 Subject: [PATCH 077/370] fix: include generatName field in default metadata property Signed-off-by: David Adelowo --- .../apiextensions/v1/composition/patches_test.go | 8 ++++++++ pkg/validation/apiextensions/v1/composition/schema.go | 1 + 2 files changed, 9 insertions(+) diff --git a/pkg/validation/apiextensions/v1/composition/patches_test.go b/pkg/validation/apiextensions/v1/composition/patches_test.go index a35de8cf2..d6d9fbe89 100644 --- a/pkg/validation/apiextensions/v1/composition/patches_test.go +++ b/pkg/validation/apiextensions/v1/composition/patches_test.go @@ -492,6 +492,14 @@ func TestValidateFieldPath(t *testing.T) { schema: &apiextensions.JSONSchemaProps{Properties: map[string]apiextensions.JSONSchemaProps{"metadata": {Type: "object"}}}, }, }, + "AcceptMetadataGenerateName": { + reason: "Should accept metadata.generateName", + want: want{err: nil, fieldType: "string"}, + args: args{ + fieldPath: "metadata.generateName", + schema: &apiextensions.JSONSchemaProps{Properties: map[string]apiextensions.JSONSchemaProps{"metadata": {Type: "object"}}}, + }, + }, "AcceptXPreserveUnknownFieldsInAdditionalProperties": { reason: "Should properly handle x-preserve-unknown-fields even if defined in a nested schema", want: want{err: nil, fieldType: ""}, diff --git a/pkg/validation/apiextensions/v1/composition/schema.go b/pkg/validation/apiextensions/v1/composition/schema.go index bc4d516c3..848fabf8f 100644 --- a/pkg/validation/apiextensions/v1/composition/schema.go +++ b/pkg/validation/apiextensions/v1/composition/schema.go @@ -32,6 +32,7 @@ func defaultMetadataOnly(metadata *apiextensions.JSONSchemaProps) *apiextensions setDefaultProperty(metadata, "name", string(schema.KnownJSONTypeString)) setDefaultProperty(metadata, "namespace", string(schema.KnownJSONTypeString)) setDefaultProperty(metadata, "uid", string(schema.KnownJSONTypeString)) + setDefaultProperty(metadata, "generateName", string(schema.KnownJSONTypeString)) setDefaultLabels(metadata) setDefaultAnnotations(metadata) return metadata From 1f0ed6670bc378fe24353b5245edb2d6bcaf9ef7 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Mon, 25 Mar 2024 16:18:22 -0400 Subject: [PATCH 078/370] kubernetes.io/docs/user-guide/labels -> kubernetes.io/docs/concepts/overview/working-with-objects/labels/ Signed-off-by: Pete Lumbis --- apis/pkg/v1/package_types.go | 2 +- apis/pkg/v1/revision_types.go | 2 +- apis/pkg/v1alpha1/config.go | 2 +- apis/pkg/v1beta1/deployment_runtime_config_types.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apis/pkg/v1/package_types.go b/apis/pkg/v1/package_types.go index 070080a74..c74c03cdc 100644 --- a/apis/pkg/v1/package_types.go +++ b/apis/pkg/v1/package_types.go @@ -70,7 +70,7 @@ type PackageSpec struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional CommonLabels map[string]string `json:"commonLabels,omitempty"` } diff --git a/apis/pkg/v1/revision_types.go b/apis/pkg/v1/revision_types.go index bde9a37b7..c48f38f53 100644 --- a/apis/pkg/v1/revision_types.go +++ b/apis/pkg/v1/revision_types.go @@ -78,7 +78,7 @@ type PackageRevisionSpec struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional CommonLabels map[string]string `json:"commonLabels,omitempty"` } diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index 0a82c8b62..96124415e 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -166,7 +166,7 @@ type PodObjectMeta struct { // labels on the pod, not the pod selector. Labels will be merged // with internal labels used by crossplane, and labels with a // crossplane.io key might be overwritten. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Labels map[string]string `json:"labels,omitempty"` } diff --git a/apis/pkg/v1beta1/deployment_runtime_config_types.go b/apis/pkg/v1beta1/deployment_runtime_config_types.go index a1aeca774..e520747d9 100644 --- a/apis/pkg/v1beta1/deployment_runtime_config_types.go +++ b/apis/pkg/v1beta1/deployment_runtime_config_types.go @@ -38,7 +38,7 @@ type ObjectMeta struct { // (scope and select) objects. Labels will be merged with internal labels // used by crossplane, and labels with a crossplane.io key might be // overwritten. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Labels map[string]string `json:"labels,omitempty"` } From 48a33769afe641e34a862bd70dd5165a1e34a6c4 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Mon, 25 Mar 2024 16:19:42 -0400 Subject: [PATCH 079/370] kubernetes.io/docs/user-guide/annotations -> /kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ Signed-off-by: Pete Lumbis --- apis/pkg/v1alpha1/config.go | 2 +- apis/pkg/v1beta1/deployment_runtime_config_types.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index 96124415e..515436e18 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -157,7 +157,7 @@ type PodObjectMeta struct { // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional Annotations map[string]string `json:"annotations,omitempty"` diff --git a/apis/pkg/v1beta1/deployment_runtime_config_types.go b/apis/pkg/v1beta1/deployment_runtime_config_types.go index e520747d9..e07d30546 100644 --- a/apis/pkg/v1beta1/deployment_runtime_config_types.go +++ b/apis/pkg/v1beta1/deployment_runtime_config_types.go @@ -30,7 +30,7 @@ type ObjectMeta struct { // Annotations is an unstructured key value map stored with a resource that // may be set by external tools to store and retrieve arbitrary metadata. // They are not queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional Annotations map[string]string `json:"annotations,omitempty"` From 3c11feb863a2356885d6b375d1d11a87cf4a4739 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 22:19:42 +0000 Subject: [PATCH 080/370] chore(deps): update gcr.io/distroless/static docker digest to 7e5c6a2 --- cluster/images/crossplane/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/images/crossplane/Dockerfile b/cluster/images/crossplane/Dockerfile index d0a497135..eab6bd90c 100644 --- a/cluster/images/crossplane/Dockerfile +++ b/cluster/images/crossplane/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/distroless/static@sha256:9be3fcc6abeaf985b5ecce59451acbcbb15e7be39472320c538d0d55a0834edc +FROM gcr.io/distroless/static@sha256:7e5c6a2a4ae854242874d36171b31d26e0539c98fc6080f942f16b03e82851ab ARG TARGETOS ARG TARGETARCH From 3a401f8abc7ae6fff7ef310654a9fda9fc347fac Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 26 Mar 2024 10:13:53 +0000 Subject: [PATCH 081/370] tests(e2e): allow passing options when decoding files Signed-off-by: Philippe Scorsolini --- test/e2e/funcs/feature.go | 50 ++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index be633baf3..da73f7d5d 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -129,11 +129,11 @@ func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) f // ResourcesCreatedWithin fails a test if the supplied resources are not found // to exist within the supplied duration. -func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func { +func ResourcesCreatedWithin(d time.Duration, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -178,11 +178,11 @@ func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { // ResourcesDeletedWithin fails a test if the supplied resources are not deleted // within the supplied duration. -func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func { +func ResourcesDeletedWithin(d time.Duration, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -327,11 +327,11 @@ var NotFound = notFound{} //nolint:gochecknoglobals // We treat this as a consta // ResourcesHaveFieldValueWithin fails a test if the supplied resources do not // have the supplied value at the supplied field path within the supplied // duration. The supplied 'want' value must cmp.Equal the actual value. -func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any) features.Func { +func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -452,7 +452,7 @@ type claimCtxKey struct{} // ApplyClaim applies the claim stored in the given folder and file // and stores it in the test context for later retrival if needed. -func ApplyClaim(manager, dir, cm string) features.Func { +func ApplyClaim(manager, dir, cm string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() @@ -464,7 +464,7 @@ func ApplyClaim(manager, dir, cm string) features.Func { return ctx } - objs, err := decoder.DecodeAllFiles(ctx, dfs, cm) + objs, err := decoder.DecodeAllFiles(ctx, dfs, cm, options...) if err != nil { t.Error(err) return ctx @@ -490,15 +490,7 @@ func ApplyClaim(manager, dir, cm string) features.Func { // SetAnnotationMutateOption returns a DecodeOption that sets the supplied // annotation on the decoded object. func SetAnnotationMutateOption(key, value string) decoder.DecodeOption { - return decoder.MutateOption(func(o k8s.Object) error { - a := o.GetAnnotations() - if a == nil { - a = map[string]string{} - } - a[key] = value - o.SetAnnotations(a) - return nil - }) + return decoder.MutateAnnotations(map[string]string{key: value}) } // ResourcesFailToApply applies all manifests under the supplied directory that @@ -506,13 +498,13 @@ func SetAnnotationMutateOption(key, value string) decoder.DecodeOption { // fields are managed by the supplied field manager. It fails the test if any // supplied resource _can_ be applied successfully - use it to test that the API // server should reject a resource. -func ResourcesFailToApply(manager, dir, pattern string) features.Func { +func ResourcesFailToApply(manager, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() dfs := os.DirFS(dir) - if err := decoder.DecodeEachFile(ctx, dfs, pattern, ApplyHandler(c.Client().Resources(), manager)); err == nil { + if err := decoder.DecodeEachFile(ctx, dfs, pattern, ApplyHandler(c.Client().Resources(), manager), options...); err == nil { // TODO(negz): Ideally we'd say which one. t.Error("Resource applied successfully, but should have failed") return ctx @@ -548,13 +540,13 @@ func ApplyHandler(r *resources.Resources, manager string, osh ...onSuccessHandle // DeleteResources deletes (from the environment) all resources defined by the // manifests under the supplied directory that match the supplied glob pattern // (e.g. *.yaml). -func DeleteResources(dir, pattern string) features.Func { +func DeleteResources(dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() dfs := os.DirFS(dir) - if err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())); err != nil { + if err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources()), options...); err != nil { t.Fatal(err) return ctx } @@ -649,13 +641,13 @@ func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { // CompositeResourceMustMatchWithin assert that a composite referred by the given file // must be matched by the given function within the given timeout. -func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool) features.Func { +func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm, options...); err != nil { t.Error(err) return ctx } @@ -701,13 +693,13 @@ func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, ma // CompositeResourceHasFieldValueWithin asserts that the XR referred to by the // claim in the given file has the specified value at the specified path within // the specified time. -func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path string, want any) features.Func { +func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path string, want any, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm, options...); err != nil { t.Error(err) return ctx } @@ -772,12 +764,12 @@ func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path // ComposedResourcesHaveFieldValueWithin fails a test if the composed // resources created by the claim does not have the supplied value at the // supplied path within the supplied duration. -func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { //nolint:gocognit // Not too much over. +func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool, options ...decoder.DecodeOption) features.Func { //nolint:gocognit // Not too much over. return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), file, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), file, cm, options...); err != nil { t.Error(err) return ctx } @@ -1008,13 +1000,13 @@ func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) feat // defined by the manifests under the supplied directory that match the supplied // glob pattern (e.g. *.yaml) and verifies that they are blocked by the usage // webhook. -func DeletionBlockedByUsageWebhook(dir, pattern string) features.Func { +func DeletionBlockedByUsageWebhook(dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() dfs := os.DirFS(dir) - err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())) + err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources()), options...) if err == nil { t.Fatal("expected the usage webhook to deny the request but deletion succeeded") return ctx From 7345ff61618abdf6e2c015b64452ea92610dfda2 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Mon, 25 Mar 2024 16:21:36 -0400 Subject: [PATCH 082/370] make reviewable output Signed-off-by: Pete Lumbis --- .../fn/proto/v1beta1/run_function.pb.go | 2 +- apis/pkg/v1alpha1/config.go | 2 +- .../pkg.crossplane.io_configurationrevisions.yaml | 2 +- cluster/crds/pkg.crossplane.io_configurations.yaml | 2 +- .../crds/pkg.crossplane.io_controllerconfigs.yaml | 6 +++--- .../pkg.crossplane.io_deploymentruntimeconfigs.yaml | 12 ++++++------ .../crds/pkg.crossplane.io_functionrevisions.yaml | 2 +- cluster/crds/pkg.crossplane.io_functions.yaml | 2 +- .../crds/pkg.crossplane.io_providerrevisions.yaml | 2 +- cluster/crds/pkg.crossplane.io_providers.yaml | 2 +- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index d68823ae6..ee4b7352a 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/run_function.proto diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index 515436e18..f178f029a 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -107,7 +107,7 @@ type ControllerConfigSpec struct { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // More info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty"` diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 646282639..97522ce26 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -70,7 +70,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object desiredState: description: DesiredState of the PackageRevision. Can be either Active diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index b9a43d5cc..ab2227a47 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -64,7 +64,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object ignoreCrossplaneConstraints: default: false diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 5e55813c4..4f326f738 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1157,7 +1157,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -1168,7 +1168,7 @@ spec: labels on the pod, not the pod selector. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object type: object nodeName: @@ -1482,7 +1482,7 @@ spec: to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + More info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md This is a beta feature as of Kubernetes v1.14. type: string securityContext: diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index d955677f9..4b8e009cd 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -66,7 +66,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -76,7 +76,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. @@ -7936,7 +7936,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -7946,7 +7946,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. @@ -7967,7 +7967,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -7977,7 +7977,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 6b269fd36..e44ccc751 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -70,7 +70,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 0753e10e2..96c3beaee 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -62,7 +62,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index a29d6c6b0..284679ed9 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -70,7 +70,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index da22951e9..8005b1bd9 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -63,7 +63,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- From d26730677dbea4a4861c24176d5c5f460ad38a89 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Tue, 26 Mar 2024 06:53:04 -0400 Subject: [PATCH 083/370] make reviewable output Signed-off-by: Pete Lumbis --- apis/apiextensions/fn/proto/v1beta1/run_function.pb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index ee4b7352a..d68823ae6 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.31.0 // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/run_function.proto From 20f9d8d352c269ea7a561360138dbff77621aa81 Mon Sep 17 00:00:00 2001 From: Matt Field Date: Tue, 26 Mar 2024 10:54:55 +0000 Subject: [PATCH 084/370] Tweaks description to include mention of dev, staging, and prodution Signed-off-by: Matt Field --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 5640f767c..c0cc5876e 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -80,4 +80,4 @@ This list is sorted in the order that organizations were added to it. | [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | | [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | -| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | +| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | From b49f2da4685c5a7e5bea3a16e4db0fc8896ccde2 Mon Sep 17 00:00:00 2001 From: Maxence Boutet <52334444+mboutet@users.noreply.github.com> Date: Thu, 28 Mar 2024 08:55:16 -0400 Subject: [PATCH 085/370] fix: helm chart: explicitly set divisor in rbac-manager containers resources Signed-off-by: Maxence Boutet <52334444+mboutet@users.noreply.github.com> --- .../charts/crossplane/templates/rbac-manager-deployment.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml index c94915d9c..aaa5a71ab 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml @@ -64,11 +64,13 @@ spec: resourceFieldRef: containerName: {{ .Chart.Name }}-init resource: limits.cpu + divisor: "1" - name: GOMEMLIMIT valueFrom: resourceFieldRef: containerName: {{ .Chart.Name }}-init resource: limits.memory + divisor: "1" containers: - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}" args: @@ -97,11 +99,13 @@ spec: resourceFieldRef: containerName: {{ .Chart.Name }} resource: limits.cpu + divisor: "1" - name: GOMEMLIMIT valueFrom: resourceFieldRef: containerName: {{ .Chart.Name }} resource: limits.memory + divisor: "1" - name: LEADER_ELECTION value: "{{ .Values.rbacManager.leaderElection }}" {{- range $key, $value := .Values.extraEnvVarsRBACManager }} From a07ec8f442d6c9fea7fa1b5ac7814811307f76e3 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Thu, 28 Mar 2024 15:12:14 -0400 Subject: [PATCH 086/370] Expand and improve docstrings to comply with Vale Signed-off-by: Pete Lumbis --- .../v1/composition_revision_types.go | 7 +++++-- apis/apiextensions/v1/composition_types.go | 6 +++++- apis/apiextensions/v1/xrd_types.go | 8 +++++--- .../v1alpha1/environment_config_types.go | 6 +++++- apis/apiextensions/v1alpha1/usage_types.go | 6 ++++++ .../zz_generated.composition_revision_types.go | 7 +++++-- apis/pkg/v1/configuration_types.go | 14 ++++++++++++-- apis/pkg/v1/provider_types.go | 12 ++++++++++-- apis/pkg/v1alpha1/config.go | 12 +++++++++--- .../v1beta1/deployment_runtime_config_types.go | 9 +++++---- apis/pkg/v1beta1/function_types.go | 16 ++++++++++++++-- apis/secrets/v1alpha1/storeconfig_types.go | 3 ++- ...ossplane.io_compositeresourcedefinitions.yaml | 9 ++++++--- ...sions.crossplane.io_compositionrevisions.yaml | 16 ++++++++++++---- ...apiextensions.crossplane.io_compositions.yaml | 8 +++++++- ...ensions.crossplane.io_environmentconfigs.yaml | 9 +++++++-- .../crds/apiextensions.crossplane.io_usages.yaml | 12 ++++++++++-- ...pkg.crossplane.io_configurationrevisions.yaml | 8 +++++++- .../crds/pkg.crossplane.io_configurations.yaml | 12 ++++++++++-- .../pkg.crossplane.io_controllerconfigs.yaml | 14 +++++++++++--- ...g.crossplane.io_deploymentruntimeconfigs.yaml | 10 ++++++---- .../pkg.crossplane.io_functionrevisions.yaml | 8 +++++++- cluster/crds/pkg.crossplane.io_functions.yaml | 14 ++++++++++++-- .../pkg.crossplane.io_providerrevisions.yaml | 8 +++++++- cluster/crds/pkg.crossplane.io_providers.yaml | 8 +++++++- .../crds/secrets.crossplane.io_storeconfigs.yaml | 5 +++-- 26 files changed, 195 insertions(+), 52 deletions(-) diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index 83fbf46de..f62eb2825 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -133,8 +133,11 @@ type CompositionRevisionStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositionRevision represents a revision in time of a Composition. -// Revisions are created by Crossplane; they should be treated as immutable. +// A CompositionRevision represents a revision of a Composition. Crossplane +// creates new revisions when there are changes to the Composition. +// +// Crossplane creates and manages CompositionRevisions. Composition Revisions +// aren't designed for user changes. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index 597f12090..2d77cccb6 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -109,7 +109,11 @@ type CompositionSpec struct { // +genclient // +genclient:nonNamespaced -// A Composition specifies how a composite resource should be composed. +// A Composition defines a template for creating multiple managed resources into +// a single Kubernetes object. +// +// Read the Crossplane documentation for +// [more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions). // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index 2a0415d36..ce4f0c61b 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -212,9 +212,11 @@ type CompositeResourceDefinitionControllerStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositeResourceDefinition defines a new kind of composite infrastructure -// resource. The new resource is composed of other composite or managed -// infrastructure resources. +// A CompositeResourceDefinition defines the schema for a new custom Kubernetes +// API. +// +// Read the Crossplane documentation for +// [more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions). // +kubebuilder:printcolumn:name="ESTABLISHED",type="string",JSONPath=".status.conditions[?(@.type=='Established')].status" // +kubebuilder:printcolumn:name="OFFERED",type="string",JSONPath=".status.conditions[?(@.type=='Offered')].status" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/apis/apiextensions/v1alpha1/environment_config_types.go b/apis/apiextensions/v1alpha1/environment_config_types.go index 217b65006..d76a231f3 100644 --- a/apis/apiextensions/v1alpha1/environment_config_types.go +++ b/apis/apiextensions/v1alpha1/environment_config_types.go @@ -26,7 +26,11 @@ import ( // +genclient // +genclient:nonNamespaced -// A EnvironmentConfig contains a set of arbitrary, unstructured values. +// An EnvironmentConfig contains user-defined unstructured values for +// use in a Composition. +// +// Read the Crossplane documentation for +// [more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs). // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster,categories=crossplane,shortName=envcfg type EnvironmentConfig struct { diff --git a/apis/apiextensions/v1alpha1/usage_types.go b/apis/apiextensions/v1alpha1/usage_types.go index 1cb1ee0e8..f1d1370e3 100644 --- a/apis/apiextensions/v1alpha1/usage_types.go +++ b/apis/apiextensions/v1alpha1/usage_types.go @@ -79,6 +79,12 @@ type UsageStatus struct { } // A Usage defines a deletion blocking relationship between two resources. +// +// Usages prevent accidental deletion of a single resource or deletion of +// resources with dependent resources. +// +// Read the Crossplane documentation for +// [more information about Compositions](https://docs.crossplane.io/latest/concepts/usages). // +kubebuilder:object:root=true // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="DETAILS",type="string",JSONPath=".metadata.annotations.crossplane\\.io/usage-details" diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index 3415ce384..000792f80 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -134,8 +134,11 @@ type CompositionRevisionStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositionRevision represents a revision in time of a Composition. -// Revisions are created by Crossplane; they should be treated as immutable. +// A CompositionRevision represents a revision of a Composition. Crossplane +// creates new revisions when there are changes to the Composition. +// +// Crossplane creates and manages CompositionRevisions. Composition Revisions +// aren't designed for user changes. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/pkg/v1/configuration_types.go b/apis/pkg/v1/configuration_types.go index 5d30cdd30..f216fa13b 100644 --- a/apis/pkg/v1/configuration_types.go +++ b/apis/pkg/v1/configuration_types.go @@ -26,7 +26,13 @@ import ( // +genclient // +genclient:nonNamespaced -// Configuration is the CRD type for a request to add a configuration to Crossplane. +// A Configuration installs OCI "Crossplane package" images into Crossplane. +// +// The Configuration kind defines the Configuration image and settings +// Crossplane uses. +// +// Read the Crossplane documentation for +// [more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -67,7 +73,11 @@ type ConfigurationList struct { // +genclient // +genclient:nonNamespaced -// A ConfigurationRevision that has been added to Crossplane. +// A ConfigurationRevision represents a revision of a Configuration. Crossplane +// creates new revisions when there are changes to a Configuration. +// +// Crossplane creates and manages ConfigurationRevision. Configuration Revisions +// aren't designed for user changes. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1/provider_types.go b/apis/pkg/v1/provider_types.go index cd043d972..41b892503 100644 --- a/apis/pkg/v1/provider_types.go +++ b/apis/pkg/v1/provider_types.go @@ -26,7 +26,11 @@ import ( // +genclient // +genclient:nonNamespaced -// Provider is the CRD type for a request to add a provider to Crossplane. +// A Provider installs the Provider package, providing the CRDs representing +// external resources. +// +// Read the Crossplane documentation for +// [more information about Providers](https://docs.crossplane.io/latest/concepts/providers). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -74,7 +78,11 @@ type ProviderRevisionSpec struct { // +genclient // +genclient:nonNamespaced -// A ProviderRevision that has been added to Crossplane. +// A ProviderRevision represents a revision of a Provider. Crossplane +// creates new revisions when there are changes to a Provider. +// +// Crossplane creates and manages ProviderRevisions. Provider Revisions +// aren't designed for user changes. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index f178f029a..68c961072 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -175,9 +175,15 @@ type PodObjectMeta struct { // +genclient // +genclient:nonNamespaced -// ControllerConfig is the CRD type for a packaged controller configuration. -// Deprecated: This API is replaced by DeploymentRuntimeConfig, and is scheduled -// to be removed in a future release. See the design doc for more details: +// A ControllerConfig applies settings to controllers like Provider pods. +// +// _Note:_ Crossplane deprecated this API. Use the +// [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) +// instead. +// +// Read the +// [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) +// design document for more details. // https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster diff --git a/apis/pkg/v1beta1/deployment_runtime_config_types.go b/apis/pkg/v1beta1/deployment_runtime_config_types.go index e07d30546..b1f33fe11 100644 --- a/apis/pkg/v1beta1/deployment_runtime_config_types.go +++ b/apis/pkg/v1beta1/deployment_runtime_config_types.go @@ -87,10 +87,11 @@ type DeploymentRuntimeConfigSpec struct { // +genclient // +genclient:nonNamespaced -// A DeploymentRuntimeConfig is used to configure the package runtime when -// the package uses a runtime and the package manager is running with -// --package-runtime=Deployment (the default). See the following design doc for -// more details:https://github.com/crossplane/crossplane/blob/91edeae3fcac96c6c8a1759a723981eea4bb77e4/design/one-pager-package-runtime-config.md#migration-from-controllerconfig +// The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment +// of a Provider or composition function package. +// +// Read the Crossplane documentation for +// [more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration). // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster,categories={crossplane} type DeploymentRuntimeConfig struct { diff --git a/apis/pkg/v1beta1/function_types.go b/apis/pkg/v1beta1/function_types.go index 89b5eb6f7..c38be476f 100644 --- a/apis/pkg/v1beta1/function_types.go +++ b/apis/pkg/v1beta1/function_types.go @@ -34,7 +34,15 @@ import ( // +genclient // +genclient:nonNamespaced -// Function is the CRD type for a request to deploy a long-running Function. +// A Function kind represents a +// [Composition Function](https://docs.crossplane.io/latest/concepts/composition-functions/) +// in Crossplane. +// +// Composition Functions are custom programs that interact with Crossplane +// resources, providers and other components. +// +// Read the Crossplane documentation for +// [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -82,7 +90,11 @@ type FunctionRevisionSpec struct { // +genclient // +genclient:nonNamespaced -// A FunctionRevision that has been added to Crossplane. +// A FunctionRevision represents a revision of a Function. Crossplane +// creates new revisions when there are changes to the Function. +// +// Crossplane creates and manages FunctionRevisions. Function Revisions +// aren't designed for user changes. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/secrets/v1alpha1/storeconfig_types.go b/apis/secrets/v1alpha1/storeconfig_types.go index ba364c1d4..33f3b3b1d 100644 --- a/apis/secrets/v1alpha1/storeconfig_types.go +++ b/apis/secrets/v1alpha1/storeconfig_types.go @@ -29,7 +29,8 @@ type StoreConfigSpec struct { // +kubebuilder:object:root=true -// A StoreConfig configures how Crossplane controllers should store connection details. +// A StoreConfig configures how Crossplane controllers should store connection +// details in an external secret store. // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="TYPE",type="string",JSONPath=".spec.type" // +kubebuilder:printcolumn:name="DEFAULT-SCOPE",type="string",JSONPath=".spec.defaultScope" diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 602690af6..8bb056fd0 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -32,9 +32,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositeResourceDefinition defines a new kind of composite infrastructure - resource. The new resource is composed of other composite or managed - infrastructure resources. + A CompositeResourceDefinition defines the schema for a new custom Kubernetes + API. + + + Read the Crossplane documentation for + [more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions). properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index e21337683..d13307d8c 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -34,8 +34,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositionRevision represents a revision in time of a Composition. - Revisions are created by Crossplane; they should be treated as immutable. + A CompositionRevision represents a revision of a Composition. Crossplane + creates new revisions when there are changes to the Composition. + + + Crossplane creates and manages CompositionRevisions. Composition Revisions + aren't designed for user changes. properties: apiVersion: description: |- @@ -1610,8 +1614,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositionRevision represents a revision in time of a Composition. - Revisions are created by Crossplane; they should be treated as immutable. + A CompositionRevision represents a revision of a Composition. Crossplane + creates new revisions when there are changes to the Composition. + + + Crossplane creates and manages CompositionRevisions. Composition Revisions + aren't designed for user changes. properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 4f0b43972..2be9b24ba 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -30,7 +30,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A Composition specifies how a composite resource should be composed. + description: |- + A Composition defines a template for creating multiple managed resources into + a single Kubernetes object. + + + Read the Crossplane documentation for + [more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions). properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 9a703e214..1d80d90b2 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -24,8 +24,13 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A EnvironmentConfig contains a set of arbitrary, unstructured - values. + description: |- + An EnvironmentConfig contains user-defined unstructured values for + use in a Composition. + + + Read the Crossplane documentation for + [more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs). properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index e05a9b444..b4a4cc072 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -28,8 +28,16 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A Usage defines a deletion blocking relationship between two - resources. + description: |- + A Usage defines a deletion blocking relationship between two resources. + + + Usages prevent accidental deletion of a single resource or deletion of + resources with dependent resources. + + + Read the Crossplane documentation for + [more information about Compositions](https://docs.crossplane.io/latest/concepts/usages). properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 97522ce26..64555df34 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -41,7 +41,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A ConfigurationRevision that has been added to Crossplane. + description: |- + A ConfigurationRevision represents a revision of a Configuration. Crossplane + creates new revisions when there are changes to a Configuration. + + + Crossplane creates and manages ConfigurationRevision. Configuration Revisions + aren't designed for user changes. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index ab2227a47..e70a1ac57 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -32,8 +32,16 @@ spec: name: v1 schema: openAPIV3Schema: - description: Configuration is the CRD type for a request to add a configuration - to Crossplane. + description: |- + A Configuration installs OCI "Crossplane package" images into Crossplane. + + + The Configuration kind defines the Configuration image and settings + Crossplane uses. + + + Read the Crossplane documentation for + [more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages). properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 4f326f738..108969e42 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -24,9 +24,17 @@ spec: schema: openAPIV3Schema: description: |- - ControllerConfig is the CRD type for a packaged controller configuration. - Deprecated: This API is replaced by DeploymentRuntimeConfig, and is scheduled - to be removed in a future release. See the design doc for more details: + A ControllerConfig applies settings to controllers like Provider pods. + + + _Note:_ Crossplane deprecated this API. Use the + [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) + instead. + + + Read the + [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) + design document for more details. https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md properties: apiVersion: diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index 4b8e009cd..7b80a697f 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -23,10 +23,12 @@ spec: schema: openAPIV3Schema: description: |- - A DeploymentRuntimeConfig is used to configure the package runtime when - the package uses a runtime and the package manager is running with - --package-runtime=Deployment (the default). See the following design doc for - more details:https://github.com/crossplane/crossplane/blob/91edeae3fcac96c6c8a1759a723981eea4bb77e4/design/one-pager-package-runtime-config.md#migration-from-controllerconfig + The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment + of a Provider or composition function package. + + + Read the Crossplane documentation for + [more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration). properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index e44ccc751..73de273c4 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -41,7 +41,13 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: A FunctionRevision that has been added to Crossplane. + description: |- + A FunctionRevision represents a revision of a Function. Crossplane + creates new revisions when there are changes to the Function. + + + Crossplane creates and manages FunctionRevisions. Function Revisions + aren't designed for user changes. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 96c3beaee..919740e6e 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -32,8 +32,18 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: Function is the CRD type for a request to deploy a long-running - Function. + description: |- + A Function kind represents a + [Composition Function](https://docs.crossplane.io/latest/concepts/composition-functions/) + in Crossplane. + + + Composition Functions are custom programs that interact with Crossplane + resources, providers and other components. + + + Read the Crossplane documentation for + [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 284679ed9..ddae0161b 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -41,7 +41,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A ProviderRevision that has been added to Crossplane. + description: |- + A ProviderRevision represents a revision of a Provider. Crossplane + creates new revisions when there are changes to a Provider. + + + Crossplane creates and manages ProviderRevisions. Provider Revisions + aren't designed for user changes. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 8005b1bd9..385e857b3 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -32,7 +32,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: Provider is the CRD type for a request to add a provider to Crossplane. + description: |- + A Provider installs the Provider package, providing the CRDs representing + external resources. + + + Read the Crossplane documentation for + [more information about Providers](https://docs.crossplane.io/latest/concepts/providers). properties: apiVersion: description: |- diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index 3ebc1d723..989d5fcb4 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -29,8 +29,9 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A StoreConfig configures how Crossplane controllers should store - connection details. + description: |- + A StoreConfig configures how Crossplane controllers should store connection + details in an external secret store. properties: apiVersion: description: |- From 7a44c403afec58b0691ebd435fe91cfee9cbdae8 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Thu, 28 Mar 2024 15:40:16 -0400 Subject: [PATCH 087/370] remove duplicate link Signed-off-by: Pete Lumbis --- apis/pkg/v1alpha1/config.go | 1 - cluster/crds/pkg.crossplane.io_controllerconfigs.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index 68c961072..e0b51a2c7 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -184,7 +184,6 @@ type PodObjectMeta struct { // Read the // [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) // design document for more details. -// https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster // +kubebuilder:deprecatedversion:warning="ControllerConfig.pkg.crossplane.io/v1alpha1 is deprecated. Use DeploymentRuntimeConfig from pkg.crossplane.io/v1beta1 instead." diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 108969e42..f254581cc 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -35,7 +35,6 @@ spec: Read the [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) design document for more details. - https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md properties: apiVersion: description: |- From 9415fa281a1b8e3acb499e182bd85ab306738924 Mon Sep 17 00:00:00 2001 From: dalton-hill-0 <111203575+dalton-hill-0@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:30:57 -0500 Subject: [PATCH 088/370] Proposal to Allow Composition Functions to Set Claim Conditions (#5426) Add design doc for communicating between composition functions and the claim Signed-off-by: dalton hill --- design/one-pager-fn-claim-conditions.md | 272 ++++++++++++++++++++++++ 1 file changed, 272 insertions(+) create mode 100644 design/one-pager-fn-claim-conditions.md diff --git a/design/one-pager-fn-claim-conditions.md b/design/one-pager-fn-claim-conditions.md new file mode 100644 index 000000000..e6a5c5644 --- /dev/null +++ b/design/one-pager-fn-claim-conditions.md @@ -0,0 +1,272 @@ +# Communication Between Composition Functions and the Claim + +* Owner: Dalton Hill (@dalton-hill-0) +* Reviewers: Nic Cope (@negz) +* Status: Draft + +## Background + +### Desired Behavior +Composition Function authors should be able to communicate and translate +the underlying status with users. + +#### Managed Resource Status +We think authors often won't want to surface the status as it appear on an MR, +but will probably want to derive more user-friendly messages from it. Messages +that are more meaningful to folks reading claims. + +Some examples include: +- The external system for an MR is unreachable. +- The MR is incorrectly configured. +- The MR is being created, updated, etc. + +#### Internal Errors +We think authors may want to have a catch-all Internal Error +message. Authors should be able to display the real error on the XR and provide +a basic "Internal Error" message on the Claim. + +Currently internal errors often leave the Claim in a "Waiting" state. It would +be nice to notify the user that an internal error was encountered, and that the +team has been notified by an alert. + +### Existing Behavior + +#### Function Results +Currently functions can return Results. Depending on the type of results seen, +you can observe the following behavior on the Composite Resource. + +Fatal Result: +- Synced status condition is set to False, contains result's message. +- Warning Event generated (reason: ReconcileError), containing result's message. + +Warning Result: +- Warning Event (reason: ComposeResources) generated, containing result's + message. + +Normal Result: +- Normal Event (reason: ComposeResources) generated, containing result's + message. + + +#### Setting the Claim's Status +Currently the only path to communicate a custom message with the user is by +defining your own field in the Claim's status. +For example, we can define an XRD with: +```yaml +status: + someCommunicationField: + - msg: "Something went wrong." +``` + +There are a couple issues with this solution. +- If we need to halt resource reconciliation due to a fatal error, we can do so + with the [SDK](https://github.com/crossplane/function-sdk-go)'s + `response.Fatal`, however, this does not also allow us to update the XR and + Claim for communication with the user. +- There is an existing field that would be more intuitive to use as it is + already performing this same task for Crossplane itself (`status.conditions`). + +#### Setting the Composite's Status Conditions +Currently you can update the Composite's status conditions by setting them with +SetDesiredCompositeResource. +There are a couple of limitations to this: +- it only shows up on the XR +- it only shows up if there are no fatal results + +Example of setting the Composite's status conditions. +```go +// includes: +// corev1 "k8s.io/api/core/v1" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// +// "github.com/crossplane/function-sdk-go/response" +desiredXR, err := request.GetDesiredCompositeResource(req) +c := xpv1.Condition{ + Type: xpv1.ConditionType("ImageReady"), + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: "NotFound", + Message: "The image provided does not exist or you are not "+ + "authorized to use it.", +} +desiredXR.Resource.SetConditions(c) +response.SetDesiredCompositeResource(rsp, desiredXR) +``` + +## Proposal +We would like to allow the Composition Function author to: +- Choose where results go (Claim or XR) +- Allow results to update the Status Conditions of the XR and Claim + +The following sections get into the details of each of the above items. + +### Choose Where Results Go +Currently each result returned by a function will create a corresponding +event on the XR (if no previous fatal result exists). + +We can expand this functionality by allowing the Result to have targets. In +order to accomplish this, we will need to expand the Result API as follows. +```protobuf +message Result { + // Omitted for brevity + + Target target = 3; +} + +// Target of Function results. +enum Target { + TARGET_UNSPECIFIED = 0; + TARGET_COMPOSITE_ONLY = 1; + TARGET_COMPOSITE_AND_CLAIM = 2; +} +``` +The reason for having `TARGET_COMPOSITE_AND_CLAIM` and not `TARGET_CLAIM` is an +implementation limitation. This prevents more involved API changes, and this +is also consistent with existing behavior (func copies to XR, Crossplane copies +XR to Claim). + +The following is an example of how a function author could use this behavior. +Note that this is just a sketch and may not be the final API. +```go +// import "github.com/crossplane/function-sdk-go/response" +response.Fatal(rsp, errors.New("The image provided does not exist or you are not authorized to use it.")). + ConditionFalse("ImageReady", "NotFound"). + TargetCompositeAndClaim() +``` + +To support this behavior, the status of the Composite would need an additional +field `claimConditions`. This field will contain the types of conditions that +should be propagated to the Claim. +```yaml +# composite status +status: + # The XR's condition types that should be back-propagated to the claim + claimConditions: [DatabaseReady, ImageReady] + # The XR's conditions + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: False + reason: NotFound + message: The image provided does not exist or you are not authorized to use it. +``` + +### Allow Results to Set a Condition +We would like the function author to be able to set the Claim's status +conditions. This would allow the function author to clearly communicate the +state of the Claim with their users. + +To allow the setting of conditions in the result, we will need to expand the +Result API as follows. +```protobuf +message Result { + // Omitted for brevity + + // Optionally update the supplied status condition on all targets. + // The result's reason and message will be used in the condition. + optional Condition condition = 4; +} + +message Condition { + // Type of the condition, e.g. DatabaseReady. + // 'Ready' and 'Synced' are reserved for use by Crossplane. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Machine-readable PascalCase reason. + string reason = 3; +} +``` + +An example of a function utilizing this new ability: +```go +// rb "github.com/crossplane/function-sdk-go/response/result/builder" +// const databaseReady = "DatabaseReady" +// const reasonUnauthorized = "Unauthorized" +// var messageUnauthorized = errors.New("You are unauthorized to access this resource.") +result := rb.Fatal(messageUnauthorized). + TargetCompositeAndClaim(). + WithConditionFalse(databaseReady, reasonUnauthorized). + Build() +response.AddResult(rsp, result) +``` + +## Advanced Usage Example +Lets say we are a team of platform engineers who have a Crossplane offering. +For each Claim, we wish to expose a set of conditions that users can expect to +exist which provide: +- the current status of the underlying resources +- any steps required by the user to remediate an issue + +Lets say we have a claim that does the following.. +1. Accepts an identifier to an existing database +1. Accepts an image to deploy +1. Configures a deployment that uses the image provided and is authenticated to +the database. + +### Scenarios +Given a few different scenarios, users could expect to see the following +`status.conditions` for the claim. + +#### Image Not Found +First we found the database and determined that the user has authorization, +however, the image they provided was not found. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: False + reason: NotFound + message: The image provided does not exist or you are not authorized to use + it. +``` +#### Progressing +All is fine and the application is progressing but not yet fully online. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: True + reason: Available + - type: AppReady + status: False + reason: Creating + message: Waiting for the deployment to be available. +``` + +#### Success +Once everything is online and running smoothly, users should see something like +this. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: True + reason: Available + - type: AppReady + status: True + reason: Available +``` + +## Further Reading +- [k8s typical status properties](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) From e2e4382ae241baa56f3f4320c31f580bcbdf20ef Mon Sep 17 00:00:00 2001 From: Abhijit Hendre Date: Fri, 29 Mar 2024 17:50:47 +0530 Subject: [PATCH 089/370] Adding alphasense to crossplane adopters list Signed-off-by: Abhijit Hendre --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index c0cc5876e..31e8b0bf3 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -81,3 +81,4 @@ This list is sorted in the order that organizations were added to it. | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | | [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | | [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | +| [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| \ No newline at end of file From 6365d26beb3a68b2e4058ec97626339da7e11346 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 29 Mar 2024 15:39:13 +0300 Subject: [PATCH 090/370] Add missing scrape annotations to provider pods Signed-off-by: Hasan Turken --- .../pkg/revision/runtime_override_options.go | 15 +++++++ .../pkg/revision/runtime_provider.go | 5 +++ .../controller/pkg/revision/runtime_test.go | 43 +++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/internal/controller/pkg/revision/runtime_override_options.go b/internal/controller/pkg/revision/runtime_override_options.go index 3c3212a1c..f40cfd7b2 100644 --- a/internal/controller/pkg/revision/runtime_override_options.go +++ b/internal/controller/pkg/revision/runtime_override_options.go @@ -92,6 +92,21 @@ func DeploymentWithNamespace(namespace string) DeploymentOverride { } } +// DeploymentWithOptionalPodScrapeAnnotations adds Prometheus scrape annotations +// to a Deployment pod template if they are not already set. +func DeploymentWithOptionalPodScrapeAnnotations() DeploymentOverride { + return func(d *appsv1.Deployment) { + if d.Spec.Template.Annotations == nil { + d.Spec.Template.Annotations = map[string]string{} + } + if _, ok := d.Spec.Template.Annotations["prometheus.io/scrape"]; !ok { + d.Spec.Template.Annotations["prometheus.io/scrape"] = "true" + d.Spec.Template.Annotations["prometheus.io/port"] = "8080" + d.Spec.Template.Annotations["prometheus.io/path"] = "/metrics" + } + } +} + // DeploymentWithOwnerReferences overrides the owner references of a Deployment. func DeploymentWithOwnerReferences(owners []metav1.OwnerReference) DeploymentOverride { return func(d *appsv1.Deployment) { diff --git a/internal/controller/pkg/revision/runtime_provider.go b/internal/controller/pkg/revision/runtime_provider.go index 023d03819..0be6d63c1 100644 --- a/internal/controller/pkg/revision/runtime_provider.go +++ b/internal/controller/pkg/revision/runtime_provider.go @@ -229,6 +229,11 @@ func providerDeploymentOverrides(pm *pkgmetav1.Provider, pr v1.PackageRevisionWi // and plan to remove this after implementing a migration in a future // release. DeploymentWithSelectors(providerSelectors(pm, pr)), + + // Add optional scrape annotations to the deployment. It is possible to + // disable the scraping by setting the annotation "prometheus.io/scrape" + // as "false" in the DeploymentRuntimeConfig. + DeploymentWithOptionalPodScrapeAnnotations(), } do = append(do, DeploymentRuntimeWithOptionalImage(image)) diff --git a/internal/controller/pkg/revision/runtime_test.go b/internal/controller/pkg/revision/runtime_test.go index ed73fae81..a568215f7 100644 --- a/internal/controller/pkg/revision/runtime_test.go +++ b/internal/controller/pkg/revision/runtime_test.go @@ -159,6 +159,7 @@ func TestRuntimeManifestBuilderDeployment(t *testing.T) { "pkg.crossplane.io/revision": providerRevisionName, }), func(deployment *appsv1.Deployment) { deployment.Spec.Replicas = ptr.To[int32](3) + deployment.Spec.Template.Annotations = nil deployment.Spec.Template.Labels["k"] = "v" deployment.Spec.Template.Spec.Containers[0].Image = "crossplane/provider-foo:v1.2.4" deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, corev1.Volume{Name: "vol-a"}, corev1.Volume{Name: "vol-b"}) @@ -221,6 +222,43 @@ func TestRuntimeManifestBuilderDeployment(t *testing.T) { }), }, }, + "ProviderDeploymentNoScrapeAnnotation": { + reason: "It should be possible to disable default scrape annotations", + args: args{ + builder: &RuntimeManifestBuilder{ + revision: providerRevision, + namespace: namespace, + runtimeConfig: &v1beta1.DeploymentRuntimeConfig{ + Spec: v1beta1.DeploymentRuntimeConfigSpec{ + DeploymentTemplate: &v1beta1.DeploymentTemplate{ + Spec: &appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "prometheus.io/scrape": "false", + }, + }, + Spec: corev1.PodSpec{}, + }, + }, + }, + }, + }, + }, + serviceAccountName: providerRevisionName, + overrides: providerDeploymentOverrides(&pkgmetav1.Provider{ObjectMeta: metav1.ObjectMeta{Name: providerMetaName}}, providerRevision, providerImage), + }, + want: want{ + want: deploymentProvider(providerName, providerRevisionName, providerImage, DeploymentWithSelectors(map[string]string{ + "pkg.crossplane.io/provider": providerMetaName, + "pkg.crossplane.io/revision": providerRevisionName, + }), func(deployment *appsv1.Deployment) { + deployment.Spec.Template.Annotations = map[string]string{ + "prometheus.io/scrape": "false", + } + }), + }, + }, "ProviderDeploymentWithAdvancedRuntimeConfig": { reason: "Baseline provided by the runtime config should be applied to the deployment for advanced use cases", args: args{ @@ -391,6 +429,11 @@ func deploymentProvider(provider string, revision string, image string, override }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "prometheus.io/scrape": "true", + "prometheus.io/port": "8080", + "prometheus.io/path": "/metrics", + }, Labels: map[string]string{ "pkg.crossplane.io/revision": revision, "pkg.crossplane.io/provider": provider, From 271c25ff517bdf528a1dcc7ef5b4864163f466b6 Mon Sep 17 00:00:00 2001 From: Pete Lumbis Date: Fri, 29 Mar 2024 16:48:03 -0400 Subject: [PATCH 091/370] updates based on Nic's feedback Signed-off-by: Pete Lumbis --- apis/apiextensions/v1/composition_revision_types.go | 4 ++-- apis/apiextensions/v1/composition_types.go | 4 ++-- .../zz_generated.composition_revision_types.go | 4 ++-- apis/pkg/v1/configuration_types.go | 11 +++++------ apis/pkg/v1/provider_types.go | 8 ++++---- apis/pkg/v1alpha1/config.go | 3 +-- apis/pkg/v1beta1/function_types.go | 12 ++++-------- ...xtensions.crossplane.io_compositionrevisions.yaml | 8 ++++---- .../apiextensions.crossplane.io_compositions.yaml | 4 ++-- .../pkg.crossplane.io_configurationrevisions.yaml | 4 ++-- cluster/crds/pkg.crossplane.io_configurations.yaml | 8 +++----- .../crds/pkg.crossplane.io_controllerconfigs.yaml | 4 +--- .../crds/pkg.crossplane.io_functionrevisions.yaml | 4 ++-- cluster/crds/pkg.crossplane.io_functions.yaml | 9 ++------- .../crds/pkg.crossplane.io_providerrevisions.yaml | 4 ++-- cluster/crds/pkg.crossplane.io_providers.yaml | 4 ++-- 16 files changed, 40 insertions(+), 55 deletions(-) diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index f62eb2825..7b098762d 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -136,8 +136,8 @@ type CompositionRevisionStatus struct { // A CompositionRevision represents a revision of a Composition. Crossplane // creates new revisions when there are changes to the Composition. // -// Crossplane creates and manages CompositionRevisions. Composition Revisions -// aren't designed for user changes. +// Crossplane creates and manages CompositionRevisions. Don't directly edit +// CompositionRevisions. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index 2d77cccb6..bbc0a66ae 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -109,8 +109,8 @@ type CompositionSpec struct { // +genclient // +genclient:nonNamespaced -// A Composition defines a template for creating multiple managed resources into -// a single Kubernetes object. +// A Composition defines a collection of managed resources or functions that +// Crossplane uses to create and manage new composite resources. // // Read the Crossplane documentation for // [more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions). diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index 000792f80..84c24172f 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -137,8 +137,8 @@ type CompositionRevisionStatus struct { // A CompositionRevision represents a revision of a Composition. Crossplane // creates new revisions when there are changes to the Composition. // -// Crossplane creates and manages CompositionRevisions. Composition Revisions -// aren't designed for user changes. +// Crossplane creates and manages CompositionRevisions. Don't directly edit +// CompositionRevisions. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/pkg/v1/configuration_types.go b/apis/pkg/v1/configuration_types.go index f216fa13b..720680fc7 100644 --- a/apis/pkg/v1/configuration_types.go +++ b/apis/pkg/v1/configuration_types.go @@ -26,10 +26,9 @@ import ( // +genclient // +genclient:nonNamespaced -// A Configuration installs OCI "Crossplane package" images into Crossplane. -// -// The Configuration kind defines the Configuration image and settings -// Crossplane uses. +// A Configuration installs an OCI compatible Crossplane package, extending +// Crossplane with support for new kinds of CompositeResourceDefinitions and +// Compositions. // // Read the Crossplane documentation for // [more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages). @@ -76,8 +75,8 @@ type ConfigurationList struct { // A ConfigurationRevision represents a revision of a Configuration. Crossplane // creates new revisions when there are changes to a Configuration. // -// Crossplane creates and manages ConfigurationRevision. Configuration Revisions -// aren't designed for user changes. +// Crossplane creates and manages ConfigurationRevision. Don't directly edit +// ConfigurationRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1/provider_types.go b/apis/pkg/v1/provider_types.go index 41b892503..48dbf699a 100644 --- a/apis/pkg/v1/provider_types.go +++ b/apis/pkg/v1/provider_types.go @@ -26,8 +26,8 @@ import ( // +genclient // +genclient:nonNamespaced -// A Provider installs the Provider package, providing the CRDs representing -// external resources. +// A Provider installs an OCI compatible Crossplane package, extending +// Crossplane with support for new kinds of managed resources. // // Read the Crossplane documentation for // [more information about Providers](https://docs.crossplane.io/latest/concepts/providers). @@ -81,8 +81,8 @@ type ProviderRevisionSpec struct { // A ProviderRevision represents a revision of a Provider. Crossplane // creates new revisions when there are changes to a Provider. // -// Crossplane creates and manages ProviderRevisions. Provider Revisions -// aren't designed for user changes. +// Crossplane creates and manages ProviderRevisions. Don't directly edit +// ProviderRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index e0b51a2c7..2bab1086e 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -176,8 +176,7 @@ type PodObjectMeta struct { // +genclient:nonNamespaced // A ControllerConfig applies settings to controllers like Provider pods. -// -// _Note:_ Crossplane deprecated this API. Use the +// Deprecated: Use the // [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) // instead. // diff --git a/apis/pkg/v1beta1/function_types.go b/apis/pkg/v1beta1/function_types.go index c38be476f..f6d3bc55d 100644 --- a/apis/pkg/v1beta1/function_types.go +++ b/apis/pkg/v1beta1/function_types.go @@ -34,12 +34,8 @@ import ( // +genclient // +genclient:nonNamespaced -// A Function kind represents a -// [Composition Function](https://docs.crossplane.io/latest/concepts/composition-functions/) -// in Crossplane. -// -// Composition Functions are custom programs that interact with Crossplane -// resources, providers and other components. +// A Function installs an OCI compatible Crossplane package, extending +// Crossplane with support for a new kind of composition function. // // Read the Crossplane documentation for // [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). @@ -93,8 +89,8 @@ type FunctionRevisionSpec struct { // A FunctionRevision represents a revision of a Function. Crossplane // creates new revisions when there are changes to the Function. // -// Crossplane creates and manages FunctionRevisions. Function Revisions -// aren't designed for user changes. +// Crossplane creates and manages FunctionRevisions. Don't directly edit +// FunctionRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index d13307d8c..26115fe61 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -38,8 +38,8 @@ spec: creates new revisions when there are changes to the Composition. - Crossplane creates and manages CompositionRevisions. Composition Revisions - aren't designed for user changes. + Crossplane creates and manages CompositionRevisions. Don't directly edit + CompositionRevisions. properties: apiVersion: description: |- @@ -1618,8 +1618,8 @@ spec: creates new revisions when there are changes to the Composition. - Crossplane creates and manages CompositionRevisions. Composition Revisions - aren't designed for user changes. + Crossplane creates and manages CompositionRevisions. Don't directly edit + CompositionRevisions. properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 2be9b24ba..49a7eeb21 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -31,8 +31,8 @@ spec: schema: openAPIV3Schema: description: |- - A Composition defines a template for creating multiple managed resources into - a single Kubernetes object. + A Composition defines a collection of managed resources or functions that + Crossplane uses to create and manage new composite resources. Read the Crossplane documentation for diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 64555df34..ed34a2b54 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -46,8 +46,8 @@ spec: creates new revisions when there are changes to a Configuration. - Crossplane creates and manages ConfigurationRevision. Configuration Revisions - aren't designed for user changes. + Crossplane creates and manages ConfigurationRevision. Don't directly edit + ConfigurationRevisions. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index e70a1ac57..6bc35a71e 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -33,11 +33,9 @@ spec: schema: openAPIV3Schema: description: |- - A Configuration installs OCI "Crossplane package" images into Crossplane. - - - The Configuration kind defines the Configuration image and settings - Crossplane uses. + A Configuration installs an OCI compatible Crossplane package, extending + Crossplane with support for new kinds of CompositeResourceDefinitions and + Compositions. Read the Crossplane documentation for diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index f254581cc..3cee9ece8 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -25,9 +25,7 @@ spec: openAPIV3Schema: description: |- A ControllerConfig applies settings to controllers like Provider pods. - - - _Note:_ Crossplane deprecated this API. Use the + Deprecated: Use the [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) instead. diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 73de273c4..4695ed512 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -46,8 +46,8 @@ spec: creates new revisions when there are changes to the Function. - Crossplane creates and manages FunctionRevisions. Function Revisions - aren't designed for user changes. + Crossplane creates and manages FunctionRevisions. Don't directly edit + FunctionRevisions. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 919740e6e..cc895736c 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -33,13 +33,8 @@ spec: schema: openAPIV3Schema: description: |- - A Function kind represents a - [Composition Function](https://docs.crossplane.io/latest/concepts/composition-functions/) - in Crossplane. - - - Composition Functions are custom programs that interact with Crossplane - resources, providers and other components. + A Function installs an OCI compatible Crossplane package, extending + Crossplane with support for a new kind of composition function. Read the Crossplane documentation for diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index ddae0161b..92ff14575 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -46,8 +46,8 @@ spec: creates new revisions when there are changes to a Provider. - Crossplane creates and manages ProviderRevisions. Provider Revisions - aren't designed for user changes. + Crossplane creates and manages ProviderRevisions. Don't directly edit + ProviderRevisions. properties: apiVersion: description: |- diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 385e857b3..d18030821 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -33,8 +33,8 @@ spec: schema: openAPIV3Schema: description: |- - A Provider installs the Provider package, providing the CRDs representing - external resources. + A Provider installs an OCI compatible Crossplane package, extending + Crossplane with support for new kinds of managed resources. Read the Crossplane documentation for From c9088a58cafd5bdfa7fae6b19ca6c75f3fdcbbb9 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Thu, 4 Apr 2024 00:15:31 +0200 Subject: [PATCH 092/370] Add --context option to crossplane cli beta trace command Allows the user to run trace command without switching Kubernetes context. Signed-off-by: Sunil Shivanand --- cmd/crank/beta/trace/trace.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index b113aeb41..35fc77072 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -28,8 +28,8 @@ import ( "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/restmapper" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" @@ -62,6 +62,7 @@ type Cmd struct { // TODO(phisco): add support for all the usual kubectl flags; configFlags := genericclioptions.NewConfigFlags(true).AddFlags(...) // TODO(phisco): move to namespace defaulting to "" and use the current context's namespace + Context string `default:"" help:"Kubernetes context." name:"context" short:"c"` Namespace string `default:"default" help:"Namespace of the resource." name:"namespace" short:"n"` Output string `default:"default" enum:"default,wide,json,dot" help:"Output format. One of: default, wide, json, dot." name:"output" short:"o"` ShowConnectionSecrets bool `help:"Show connection secrets in the output." name:"show-connection-secrets" short:"s"` @@ -113,7 +114,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { } logger.Debug("Built printer", "output", c.Output) - kubeconfig, err := ctrl.GetConfig() + kubeconfig, err := config.GetConfigWithContext(c.Context) if err != nil { return errors.Wrap(err, errKubeConfig) } From 9835ea41fbb86065231db4107c2d15e3a64acb66 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 07:18:41 +0000 Subject: [PATCH 093/370] chore(deps): update module golang.org/x/net to v0.23.0 [security] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 23add0508..3ca664fff 100644 --- a/go.mod +++ b/go.mod @@ -197,9 +197,9 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.18.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect // indirect + golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 diff --git a/go.sum b/go.sum index e7751a5d1..0cbaff651 100644 --- a/go.sum +++ b/go.sum @@ -511,8 +511,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= @@ -543,8 +543,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= From d7d778ca77d8ababbcc01caf98acda664e033991 Mon Sep 17 00:00:00 2001 From: Christopher Haar Date: Thu, 4 Apr 2024 10:26:49 +0200 Subject: [PATCH 094/370] feat(version): version subcommand with server version Signed-off-by: Christopher Haar --- cmd/crank/main.go | 23 ++-------- cmd/crank/version/fetch.go | 87 ++++++++++++++++++++++++++++++++++++ cmd/crank/version/version.go | 59 ++++++++++++++++++++++++ 3 files changed, 149 insertions(+), 20 deletions(-) create mode 100644 cmd/crank/version/fetch.go create mode 100644 cmd/crank/version/version.go diff --git a/cmd/crank/main.go b/cmd/crank/main.go index f5c13e55f..f6c52dd43 100644 --- a/cmd/crank/main.go +++ b/cmd/crank/main.go @@ -18,39 +18,22 @@ limitations under the License. package main import ( - "fmt" - "github.com/alecthomas/kong" "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane/cmd/crank/beta" + "github.com/crossplane/crossplane/cmd/crank/version" "github.com/crossplane/crossplane/cmd/crank/xpkg" - "github.com/crossplane/crossplane/internal/version" ) var _ = kong.Must(&cli{}) type ( - versionFlag string verboseFlag bool ) -// Decode overrides the default string decoder to be a no-op. -func (v versionFlag) Decode(_ *kong.DecodeContext) error { return nil } - -// IsBool indicates that this string flag should be treated as a boolean value. -func (v versionFlag) IsBool() bool { return true } - -// BeforeApply indicates that we want to execute the logic before running any -// commands. -func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // BeforeApply requires this signature. - fmt.Fprintln(app.Stdout, version.New().GetVersionString()) - app.Exit(0) - return nil -} - func (v verboseFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // BeforeApply requires this signature. logger := logging.NewLogrLogger(zap.New(zap.UseDevMode(true))) ctx.BindTo(logger, (*logging.Logger)(nil)) @@ -67,11 +50,11 @@ type cli struct { // The alpha and beta subcommands are intentionally in a separate block. We // want them to appear after all other subcommands. - Beta beta.Cmd `cmd:"" help:"Beta commands."` + Beta beta.Cmd `cmd:"" help:"Beta commands."` + Version version.Cmd `cmd:"" help:"Print the client and server version information for the current context."` // Flags. Verbose verboseFlag `help:"Print verbose logging statements." name:"verbose"` - Version versionFlag `help:"Print version and quit." name:"version" short:"v"` } func main() { diff --git a/cmd/crank/version/fetch.go b/cmd/crank/version/fetch.go new file mode 100644 index 000000000..6e246be6d --- /dev/null +++ b/cmd/crank/version/fetch.go @@ -0,0 +1,87 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version contains common functions to get versions +package version + +import ( + "context" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +const ( + errKubeConfig = "failed to get kubeconfig" + errCreateK8sClientset = "could not create the clientset for Kubernetes" + errFetchCrossplaneDeployment = "could not fetch deployments" +) + +// FetchCrossplaneVersion initializes a Kubernetes client and fetches +// and returns the version of the Crossplane deployment. If the version +// does not have a leading 'v', it prepends it. +func FetchCrossplaneVersion(ctx context.Context) (string, error) { + var version string + config, err := ctrl.GetConfig() + if err != nil { + return "", errors.Wrap(err, errKubeConfig) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return "", errors.Wrap(err, errCreateK8sClientset) + } + + deployments, err := clientset.AppsV1().Deployments("").List(ctx, metav1.ListOptions{ + LabelSelector: "app=crossplane", + }) + if err != nil { + return "", errors.Wrap(err, errFetchCrossplaneDeployment) + } + + for _, deployment := range deployments.Items { + v, ok := deployment.Labels["app.kubernetes.io/version"] + if ok { + if !strings.HasPrefix(v, "v") { + version = "v" + v + } + return version, nil + } + + if len(deployment.Spec.Template.Spec.Containers) > 0 { + imageRef := deployment.Spec.Template.Spec.Containers[0].Image + ref, err := name.ParseReference(imageRef) + if err != nil { + return "", errors.Wrap(err, "error parsing image reference") + } + + if tagged, ok := ref.(name.Tag); ok { + imageTag := tagged.TagStr() + if !strings.HasPrefix(imageTag, "v") { + imageTag = "v" + imageTag + } + return imageTag, nil + } + } + } + + return "", errors.New("Crossplane version or image tag not found") +} diff --git a/cmd/crank/version/version.go b/cmd/crank/version/version.go new file mode 100644 index 000000000..7ce7e506b --- /dev/null +++ b/cmd/crank/version/version.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version contains version cmd +package version + +import ( + "context" + "fmt" + "time" + + "github.com/alecthomas/kong" + "github.com/pkg/errors" + + "github.com/crossplane/crossplane/internal/version" +) + +const ( + errGetCrossplaneVersion = "unable to get crossplane version" +) + +// Cmd represents the version command. +type Cmd struct { + Client bool `env:"" help:"If true, shows client version only (no server required)."` +} + +// Run runs the version command. +func (c *Cmd) Run(k *kong.Context) error { + fmt.Fprintln(k.Stdout, "Client Version: "+version.New().GetVersionString()) + if c.Client { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + vxp, err := FetchCrossplaneVersion(ctx) + if err != nil { + return errors.Wrap(err, errGetCrossplaneVersion) + } + if vxp != "" { + fmt.Fprintln(k.Stdout, "Server Version: "+vxp) + } + + return nil +} From 7fa6eb76675fa273b61ae8f296cc92026e77e27c Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Thu, 4 Apr 2024 11:17:31 +0200 Subject: [PATCH 095/370] Update roadmap links to prioritized list view Signed-off-by: Jared Watts --- README.md | 4 ++-- ROADMAP.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 788729913..6454e3e8d 100644 --- a/README.md +++ b/README.md @@ -114,13 +114,13 @@ Crossplane is under the Apache 2.0 license. [community meeting time]: https://www.thetimezoneconverter.com/?t=10:00&tz=PT%20%28Pacific%20Time%29 [Current agenda and past meeting notes]: https://docs.google.com/document/d/1q_sp2jLQsDEOX7Yug6TPOv7Fwrys6EwcF5Itxjkno7Y/edit?usp=sharing [Past meeting recordings]: https://www.youtube.com/playlist?list=PL510POnNVaaYYYDSICFSNWFqNbx1EMr-M -[roadmap and releases board]: https://github.com/orgs/crossplane/projects/20/views/3?pane=info +[roadmap and releases board]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info [cncf]: https://www.cncf.io/ [Get Started Docs]: https://docs.crossplane.io/latest/getting-started/ [community calendar]: https://calendar.google.com/calendar/embed?src=c_2cdn0hs9e2m05rrv1233cjoj1k%40group.calendar.google.com [releases]: https://github.com/crossplane/crossplane/releases [ADOPTERS.md]: ADOPTERS.md -[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/3?pane=info +[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info [sig-composition-environments-slack]: https://crossplane.slack.com/archives/C05BP6QFLUW [sig-composition-functions-slack]: https://crossplane.slack.com/archives/C031Y29CSAE [sig-deletion-ordering-slack]: https://crossplane.slack.com/archives/C05BP8W5ALW diff --git a/ROADMAP.md b/ROADMAP.md index 2b539b4b9..970bd371d 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -11,4 +11,4 @@ delivery timeline. [Crossplane Roadmap] -[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/3?pane=info +[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info From 1a1388fe74ef1c227af5b7eba5e3209ff091e6d4 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Mon, 8 Apr 2024 03:05:05 +0300 Subject: [PATCH 096/370] Added Github App auth to Renovate bot Signed-off-by: Mehmet Enes --- .github/workflows/renovate.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 50debb587..adcd35dbc 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -22,6 +22,12 @@ jobs: !github.event.repository.fork && !github.event.pull_request.head.repo.fork steps: + - name: Get token + id: get-github-app-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} + private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Checkout uses: actions/checkout@v4.1.1 # Don't waste time starting Renovate if JSON is invalid @@ -31,6 +37,8 @@ jobs: uses: renovatebot/github-action@v40.1.5 env: RENOVATE_REPOSITORIES: ${{ github.repository }} + # Use GitHub API to create commits + RENOVATE_PLATFORM_COMMIT: "true" with: configurationFile: .github/renovate.json5 - token: ${{ secrets.RENOVATE_TOKEN }} + token: '${{ steps.get-github-app-token.outputs.token }}' From c818f6c8c9e08f67fb6eaa5e0dcfd4382c89540f Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Mon, 8 Apr 2024 13:28:29 +0200 Subject: [PATCH 097/370] Update community meeting schedule Signed-off-by: Jared Watts --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6454e3e8d..723f28ebe 100644 --- a/README.md +++ b/README.md @@ -62,10 +62,13 @@ for more information on how you can help. * Join our regular community meetings. * Provide feedback on our [roadmap and releases board]. -The Crossplane community meeting takes place every other [Thursday at 10:00am -Pacific Time][community meeting time]. Anyone who wants to discuss the direction -of the project, design and implementation reviews, or raise general questions -with the broader community is encouraged to join. +The Crossplane community meeting takes place every 4 weeks on [Thursday at +10:00am Pacific Time][community meeting time]. You can find the up to date +meeting schedule on the [Community Calendar][community calendar]. + +Anyone who wants to discuss the direction of the project, design and +implementation reviews, or raise general questions with the broader community is +encouraged to join. * Meeting link: * [Current agenda and past meeting notes] From 36f0c56268d9b9da2c4dfb586d6c348899311be7 Mon Sep 17 00:00:00 2001 From: Bob Haddleton Date: Thu, 4 Apr 2024 13:54:09 -0500 Subject: [PATCH 098/370] fix(sa): Merge image pull secrets created by other controllers Signed-off-by: Bob Haddleton --- .../pkg/revision/runtime_function.go | 2 +- .../pkg/revision/runtime_function_test.go | 50 +++++++++++++++++++ .../pkg/revision/runtime_provider.go | 23 ++++++++- .../pkg/revision/runtime_provider_test.go | 50 +++++++++++++++++++ 4 files changed, 123 insertions(+), 2 deletions(-) diff --git a/internal/controller/pkg/revision/runtime_function.go b/internal/controller/pkg/revision/runtime_function.go index b0bed8439..804e0203c 100644 --- a/internal/controller/pkg/revision/runtime_function.go +++ b/internal/controller/pkg/revision/runtime_function.go @@ -137,7 +137,7 @@ func (h *FunctionHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.Pack // `deploymentTemplate.spec.template.spec.serviceAccountName` in the // DeploymentRuntimeConfig. if sa.Name == d.Spec.Template.Spec.ServiceAccountName { - if err := h.client.Apply(ctx, sa); err != nil { + if err := applySA(ctx, h.client, sa); err != nil { return errors.Wrap(err, errApplyFunctionSA) } } diff --git a/internal/controller/pkg/revision/runtime_function_test.go b/internal/controller/pkg/revision/runtime_function_test.go index 87cd9e39e..0c9f5f73f 100644 --- a/internal/controller/pkg/revision/runtime_function_test.go +++ b/internal/controller/pkg/revision/runtime_function_test.go @@ -398,6 +398,56 @@ func TestFunctionPostHook(t *testing.T) { }, }, }, + "SuccessWithExtraSecret": { + reason: "Should not return error if successfully applied service account with additional secret.", + args: args{ + pkg: &pkgmetav1beta1.Function{}, + rev: &v1beta1.FunctionRevision{ + Spec: v1beta1.FunctionRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: functionImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + manifests: &MockManifestBuilder{ + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { + return &corev1.ServiceAccount{} + }, + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { + return &appsv1.Deployment{} + }, + }, + client: &test.MockClient{ + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { + if sa, ok := obj.(*corev1.ServiceAccount); ok { + sa.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test_secret"}} + } + return nil + }, + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { + if d, ok := obj.(*appsv1.Deployment); ok { + d.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }} + return nil + } + return nil + }, + }, + }, + want: want{ + rev: &v1beta1.FunctionRevision{ + Spec: v1beta1.FunctionRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: functionImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + }, + }, "SuccessfulWithExternallyManagedSA": { reason: "Should be successful without creating an SA, when the SA is managed externally", args: args{ diff --git a/internal/controller/pkg/revision/runtime_provider.go b/internal/controller/pkg/revision/runtime_provider.go index 0be6d63c1..151309a49 100644 --- a/internal/controller/pkg/revision/runtime_provider.go +++ b/internal/controller/pkg/revision/runtime_provider.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -154,7 +155,7 @@ func (h *ProviderHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.Pack // `deploymentTemplate.spec.template.spec.serviceAccountName` in the // DeploymentRuntimeConfig. if sa.Name == d.Spec.Template.Spec.ServiceAccountName { - if err := h.client.Apply(ctx, sa); err != nil { + if err := applySA(ctx, h.client, sa); err != nil { return errors.Wrap(err, errApplyProviderSA) } } @@ -292,3 +293,23 @@ func getProviderImage(pm *pkgmetav1.Provider, pr v1.PackageRevisionWithRuntime, return ref.Name(), nil } + +// applySA creates/updates a ServiceAccount and includes any image pull secrets +// that have been added by external controllers. +func applySA(ctx context.Context, cl resource.ClientApplicator, sa *corev1.ServiceAccount) error { + oldSa := &corev1.ServiceAccount{} + if err := cl.Get(ctx, types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, oldSa); err == nil { + // Add pull secrets created by other controllers + existingSecrets := make(map[string]bool) + for _, secret := range sa.ImagePullSecrets { + existingSecrets[secret.Name] = true + } + + for _, secret := range oldSa.ImagePullSecrets { + if !existingSecrets[secret.Name] { + sa.ImagePullSecrets = append(sa.ImagePullSecrets, secret) + } + } + } + return cl.Apply(ctx, sa) +} diff --git a/internal/controller/pkg/revision/runtime_provider_test.go b/internal/controller/pkg/revision/runtime_provider_test.go index 467c99f36..5e2bb0253 100644 --- a/internal/controller/pkg/revision/runtime_provider_test.go +++ b/internal/controller/pkg/revision/runtime_provider_test.go @@ -468,6 +468,56 @@ func TestProviderPostHook(t *testing.T) { }, }, }, + "SuccessWithExtraSecret": { + reason: "Should not return error if successfully applied service account with additional secret.", + args: args{ + pkg: &pkgmetav1.Provider{}, + rev: &v1.ProviderRevision{ + Spec: v1.ProviderRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: providerImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + manifests: &MockManifestBuilder{ + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { + return &corev1.ServiceAccount{} + }, + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { + return &appsv1.Deployment{} + }, + }, + client: &test.MockClient{ + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { + if sa, ok := obj.(*corev1.ServiceAccount); ok { + sa.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test_secret"}} + } + return nil + }, + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { + if d, ok := obj.(*appsv1.Deployment); ok { + d.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }} + return nil + } + return nil + }, + }, + }, + want: want{ + rev: &v1.ProviderRevision{ + Spec: v1.ProviderRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: providerImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + }, + }, "SuccessfulWithExternallyManagedSA": { reason: "Should be successful without creating an SA, when the SA is managed externally", args: args{ From 6cbdd0a37961472d90233f6dbe148f63cbffc45a Mon Sep 17 00:00:00 2001 From: Joao Boto Date: Tue, 9 Apr 2024 11:53:49 +0200 Subject: [PATCH 099/370] Add ToAdler32 to CRDs Signed-off-by: Joao Boto --- apis/apiextensions/v1/composition_transforms.go | 3 ++- .../v1beta1/zz_generated.composition_transforms.go | 3 ++- ...xtensions.crossplane.io_compositionrevisions.yaml | 12 ++++++++++++ .../apiextensions.crossplane.io_compositions.yaml | 6 ++++++ 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/apis/apiextensions/v1/composition_transforms.go b/apis/apiextensions/v1/composition_transforms.go index 6a31c0204..a972e58b9 100644 --- a/apis/apiextensions/v1/composition_transforms.go +++ b/apis/apiextensions/v1/composition_transforms.go @@ -374,8 +374,9 @@ type StringTransform struct { // `ToJson` converts any input value into its raw JSON representation. // `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input // converted to JSON. + // `ToAdler32` generate a addler32 hash based on the input string. // +optional - // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512 + // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512;ToAdler32 Convert *StringConversionType `json:"convert,omitempty"` // Trim the prefix or suffix from the input diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go index 6c6847c2f..c4e4a9406 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go @@ -376,8 +376,9 @@ type StringTransform struct { // `ToJson` converts any input value into its raw JSON representation. // `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input // converted to JSON. + // `ToAdler32` generate a addler32 hash based on the input string. // +optional - // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512 + // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512;ToAdler32 Convert *StringConversionType `json:"convert,omitempty"` // Trim the prefix or suffix from the input diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 26115fe61..b2996800d 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -462,6 +462,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -471,6 +472,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -875,6 +877,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -884,6 +887,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -1371,6 +1375,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -1380,6 +1385,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -2042,6 +2048,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2051,6 +2058,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -2455,6 +2463,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2464,6 +2473,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -2951,6 +2961,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2960,6 +2971,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 49a7eeb21..0a0b8cdff 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -457,6 +457,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -466,6 +467,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -870,6 +872,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -879,6 +882,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -1366,6 +1370,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -1375,6 +1380,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- From 1b00a18d16d1499995c88f8c4387b55a73438353 Mon Sep 17 00:00:00 2001 From: santhoshivan23 Date: Tue, 9 Apr 2024 22:59:25 +0530 Subject: [PATCH 100/370] cli: implement tilde expansion for validate Signed-off-by: santhoshivan23 --- cmd/crank/beta/validate/cmd.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index 1e376fdd8..f684ffd57 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -20,6 +20,7 @@ package validate import ( "os" "path/filepath" + "strings" "github.com/alecthomas/kong" "github.com/spf13/afero" @@ -117,6 +118,11 @@ func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { c.CacheDir = filepath.Join(currentPath, c.CacheDir) } + if strings.HasPrefix(c.CacheDir, "~/") { + homeDir, _ := os.UserHomeDir() + c.CacheDir = filepath.Join(homeDir, c.CacheDir[2:]) + } + m := NewManager(c.CacheDir, c.fs, k.Stdout) // Convert XRDs/CRDs to CRDs and add package dependencies From 068648fd666ea2294d3a9f86fca3511c077e3099 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Thu, 11 Apr 2024 00:38:13 +0100 Subject: [PATCH 101/370] Adding support for Functions in crank validate (fixes #5491) Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/manager.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 8a54127fa..9d30dc397 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -191,13 +191,17 @@ func (m *Manager) addDependencies() error { deps := cfg.Spec.MetaSpec.DependsOn for _, dep := range deps { image := "" - if dep.Configuration != nil { + if dep.Configuration != nil { //nolint:gocritic // switch is not suitable here image = *dep.Configuration } else if dep.Provider != nil { image = *dep.Provider + } else if dep.Function != nil { + image = *dep.Function + } + if len(image) > 0 { + image = fmt.Sprintf(imageFmt, image, dep.Version) + m.deps[image] = true } - image = fmt.Sprintf(imageFmt, image, dep.Version) - m.deps[image] = true } } From 5c2d7f5d35ae64c347c2f3afe5d6f3ac14f5daa7 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:21:00 +0000 Subject: [PATCH 102/370] chore(deps): pin dependencies --- .github/workflows/renovate.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index adcd35dbc..d50fe70f1 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -24,17 +24,17 @@ jobs: steps: - name: Get token id: get-github-app-token - uses: actions/create-github-app-token@v1 + uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1 with: app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator - name: Self-hosted Renovate - uses: renovatebot/github-action@v40.1.5 + uses: renovatebot/github-action@89bd050bafa5a15de5d9383e3129edf210422004 # v40.1.5 env: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits From cf3885087cb47aefee892dffb8a5dfadb21405a5 Mon Sep 17 00:00:00 2001 From: Bob Haddleton Date: Tue, 16 Apr 2024 14:11:09 -0500 Subject: [PATCH 103/370] Add functions to init container for helm chart deployment Signed-off-by: Bob Haddleton --- cluster/charts/crossplane/README.md | 1 + .../crossplane/templates/deployment.yaml | 4 + cluster/charts/crossplane/values.yaml | 4 + cmd/crossplane/core/init.go | 3 +- internal/initializer/installer.go | 32 +++++- internal/initializer/installer_test.go | 101 +++++++++++++++++- 6 files changed, 139 insertions(+), 6 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 87a619f7b..63a11ff37 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -77,6 +77,7 @@ and their default values. | `extraObjects` | To add arbitrary Kubernetes Objects during a Helm Install | `[]` | | `extraVolumeMountsCrossplane` | Add custom `volumeMounts` to the Crossplane pod. | `{}` | | `extraVolumesCrossplane` | Add custom `volumes` to the Crossplane pod. | `{}` | +| `function.packages` | A list of Function packages to install | `[]` | | `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. | `false` | | `image.pullPolicy` | The image pull policy used for Crossplane and RBAC Manager pods. | `"IfNotPresent"` | | `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/crossplane/crossplane"` | diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index d6e31a7a3..057e30e81 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -59,6 +59,10 @@ spec: - --configuration - "{{ $arg }}" {{- end }} + {{- range $arg := .Values.function.packages }} + - --function + - "{{ $arg }}" + {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} name: {{ .Chart.Name }}-init resources: diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 971dae262..96e2f7976 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -51,6 +51,10 @@ configuration: # -- A list of Configuration packages to install. packages: [] +function: + # -- A list of Function packages to install + packages: [] + # -- The imagePullSecret names to add to the Crossplane ServiceAccount. imagePullSecrets: {} diff --git a/cmd/crossplane/core/init.go b/cmd/crossplane/core/init.go index aefb001cb..f694ada56 100644 --- a/cmd/crossplane/core/init.go +++ b/cmd/crossplane/core/init.go @@ -36,6 +36,7 @@ import ( type initCommand struct { Providers []string `help:"Pre-install a Provider by giving its image URI. This argument can be repeated." name:"provider"` Configurations []string `help:"Pre-install a Configuration by giving its image URI. This argument can be repeated." name:"configuration"` + Functions []string `help:"Pre-install a Function by giving its image URI. This argument can be repeated." name:"function"` Namespace string `default:"crossplane-system" env:"POD_NAMESPACE" help:"Namespace used to set as default scope in default secret store config." short:"n"` ServiceAccount string `default:"crossplane" env:"POD_SERVICE_ACCOUNT" help:"Name of the Crossplane Service Account."` @@ -101,7 +102,7 @@ func (c *initCommand) Run(s *runtime.Scheme, log logging.Logger) error { } steps = append(steps, initializer.NewLockObject(), - initializer.NewPackageInstaller(c.Providers, c.Configurations), + initializer.NewPackageInstaller(c.Providers, c.Configurations, c.Functions), initializer.NewStoreConfigObject(c.Namespace), initializer.StepFunc(initializer.DefaultDeploymentRuntimeConfig), ) diff --git a/internal/initializer/installer.go b/internal/initializer/installer.go index 542c9dca6..fd18584d3 100644 --- a/internal/initializer/installer.go +++ b/internal/initializer/installer.go @@ -27,21 +27,24 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" v1 "github.com/crossplane/crossplane/apis/pkg/v1" + "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/xpkg" ) const ( errListProviders = "failed getting provider list" errListConfigurations = "failed getting configuration list" + errListFunctions = "failed getting function list" errParsePackageName = "package name is not valid" errApplyPackage = "cannot apply package" ) // NewPackageInstaller returns a new package installer. -func NewPackageInstaller(p []string, c []string) *PackageInstaller { +func NewPackageInstaller(p []string, c []string, f []string) *PackageInstaller { return &PackageInstaller{ providers: p, configurations: c, + functions: f, } } @@ -49,12 +52,13 @@ func NewPackageInstaller(p []string, c []string) *PackageInstaller { type PackageInstaller struct { configurations []string providers []string + functions []string } // Run makes sure all specified packages exist. func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { - pkgs := make([]client.Object, len(pi.providers)+len(pi.configurations)) - // NOTE(hasheddan): we build maps of existing Provider and Configuration + pkgs := make([]client.Object, len(pi.providers)+len(pi.configurations)+len(pi.functions)) + // NOTE(hasheddan): we build maps of existing Provider, Configuration and Function // sources to the package names such that we can update the version when a // package specified for install matches the source of an existing package. pl := &v1.ProviderList{} @@ -86,8 +90,20 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { } cMap[xpkg.ParsePackageSourceFromReference(ref)] = c.GetName() } + fl := &v1beta1.FunctionList{} + if err := kube.List(ctx, fl); err != nil && !kerrors.IsNotFound(err) { + return errors.Wrap(err, errListFunctions) + } + fMap := make(map[string]string, len(fl.Items)) + for _, f := range fl.Items { + ref, err := name.ParseReference(f.GetSource(), name.WithDefaultRegistry("")) + if err != nil { + continue + } + fMap[xpkg.ParsePackageSourceFromReference(ref)] = f.GetName() + } // NOTE(hasheddan): we maintain a separate index from the range so that - // Providers and Configurations can be added to the same slice for applying. + // Providers, Configurations and Functions can be added to the same slice for applying. pkgsIdx := 0 for _, img := range pi.providers { p := &v1.Provider{} @@ -105,6 +121,14 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { pkgs[pkgsIdx] = c pkgsIdx++ } + for _, img := range pi.functions { + f := &v1beta1.Function{} + if err := buildPack(f, img, fMap); err != nil { + return err + } + pkgs[pkgsIdx] = f + pkgsIdx++ + } pa := resource.NewAPIPatchingApplicator(kube) for _, p := range pkgs { if err := pa.Apply(ctx, p); err != nil { diff --git a/internal/initializer/installer_test.go b/internal/initializer/installer_test.go index 0c7f246cf..e27555101 100644 --- a/internal/initializer/installer_test.go +++ b/internal/initializer/installer_test.go @@ -31,6 +31,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/pkg/v1" + "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) const ( @@ -40,6 +41,9 @@ const ( errFmtGetConfiguration = "unexpected name in configuration get: %s" errFmtPatchConfiguration = "unexpected name in configuration update: %s" errFmtPatchConfigurationSource = "unexpected source in configuration update: %s" + errFmtGetFunction = "unexpected name in function get: %s" + errFmtPatchFunction = "unexpected name in function update: %s" + errFmtPatchFunctionSource = "unexpected source in function update: %s" ) var errBoom = errors.New("boom") @@ -53,9 +57,15 @@ func TestInstaller(t *testing.T) { c1 := "crossplane/getting-started-aws:v0.0.1" c1Repo := "crossplane/getting-started-aws" c1Name := "crossplane-getting-started-aws" + f1Existing := "existing-function" + f1 := "crossplane/function-auto-ready:v0.0.1" + f1Repo := "crossplane/function-auto-ready" + f1Name := "crossplane-function-auto-ready" + type args struct { p []string c []string + f []string kube client.Client } type want struct { @@ -69,6 +79,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -102,6 +113,21 @@ func TestInstaller(t *testing.T) { }, }, } + case *v1beta1.FunctionList: + *l = v1beta1.FunctionList{ + Items: []v1beta1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: f1Name, + }, + Spec: v1beta1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: f1, + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -117,6 +143,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1beta1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -132,6 +162,10 @@ func TestInstaller(t *testing.T) { if obj.GetName() != c1Name { t.Errorf(errFmtPatchConfiguration, obj.GetName()) } + case *v1beta1.Function: + if obj.GetName() != f1Name { + t.Errorf(errFmtPatchFunction, obj.GetName()) + } default: t.Errorf("unexpected type") } @@ -144,6 +178,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -177,6 +212,21 @@ func TestInstaller(t *testing.T) { }, }, } + case *v1beta1.FunctionList: + *l = v1beta1.FunctionList{ + Items: []v1beta1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: f1Existing, + }, + Spec: v1beta1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: fmt.Sprintf("%s:%s", f1Repo, "v100.100.100"), + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -192,6 +242,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Existing { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1beta1.Function: + if key.Name != f1Existing { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -213,6 +267,13 @@ func TestInstaller(t *testing.T) { if o.GetSource() != c1 { t.Errorf(errFmtPatchConfigurationSource, o.GetSource()) } + case *v1beta1.Function: + if o.GetName() != f1Existing { + t.Errorf(errFmtPatchFunction, o.GetName()) + } + if o.GetSource() != f1 { + t.Errorf(errFmtPatchFunctionSource, o.GetSource()) + } default: t.Errorf("unexpected type") } @@ -225,6 +286,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil @@ -239,6 +301,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1beta1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -254,6 +320,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -284,6 +351,31 @@ func TestInstaller(t *testing.T) { } case *v1.ConfigurationList: return nil + case *v1beta1.FunctionList: + *l = v1beta1.FunctionList{ + Items: []v1beta1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "other-function", + }, + Spec: v1beta1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: fmt.Sprintf("%s:%s", "other-repo", "v100.100.100"), + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-function", + }, + Spec: v1beta1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: "preloaded-source", + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -299,6 +391,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1beta1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -327,6 +423,8 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf("unexpected name in configuration apply") } + case *v1beta1.Function: + t.Errorf("no functions specified") default: t.Errorf("unexpected type") } @@ -342,6 +440,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil @@ -358,7 +457,7 @@ func TestInstaller(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - i := NewPackageInstaller(tc.args.p, tc.args.c) + i := NewPackageInstaller(tc.args.p, tc.args.c, tc.args.f) err := i.Run(context.TODO(), tc.args.kube) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nRun(...): -want err, +got err:\n%s", name, diff) From 093964d45f4e09364982a67024397e2e2e611534 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 17 Apr 2024 10:06:15 +0100 Subject: [PATCH 104/370] ci: renovate postUpgradeTasks on update Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 0233d7fc6..b90eda7f4 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -137,6 +137,6 @@ "make generate" ], fileFilters: ["**/*"], - executionMode: "branch", + executionMode: "update", } } From e37c99741ae5eb7414cbc74b869f3e6cf5571eb1 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 17 Apr 2024 10:35:59 +0100 Subject: [PATCH 105/370] ci: allow make generate post upgrade command Signed-off-by: Philippe Scorsolini --- .github/workflows/renovate.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index d50fe70f1..28af4367e 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -39,6 +39,7 @@ jobs: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^make generate$"]' with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' From fcbdb48515e81544de7915b044860a5a8d32f9e1 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:39:44 +0000 Subject: [PATCH 106/370] chore(deps): update docker/login-action digest to e92390c --- .github/workflows/ci.yml | 4 ++-- .github/workflows/promote.yml | 4 ++-- .github/workflows/scan.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 440be3109..cd6e94833 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -388,14 +388,14 @@ jobs: path: _output/** - name: Login to DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} - name: Login to Upbound - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' with: registry: xpkg.upbound.io diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index fec514641..5b14a2cf2 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -49,14 +49,14 @@ jobs: run: git fetch --prune --unshallow - name: Login to DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} - name: Login to Upbound - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' with: registry: xpkg.upbound.io diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index c39490e9f..83ccbb796 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -99,7 +99,7 @@ jobs: # we log to DockerHub to avoid rate limiting - name: Login To DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} From 962f92faf76286db20ef2216c751b763f1a6ef66 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 17 Apr 2024 10:41:28 +0100 Subject: [PATCH 107/370] ci: pull submodules also when running renovate Signed-off-by: Philippe Scorsolini --- .github/workflows/renovate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 28af4367e..64d0d38a5 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -30,6 +30,8 @@ jobs: private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Checkout uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: true # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator From 8bcfb14689aabe7bf8824ac381382a56c9ebab8e Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 17 Apr 2024 10:53:14 +0100 Subject: [PATCH 108/370] ci: init submodules also as renovate postupgradecommand Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 1 + .github/workflows/renovate.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index b90eda7f4..457c7909f 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -134,6 +134,7 @@ postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ + "git submodule update --init", "make generate" ], fileFilters: ["**/*"], diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 64d0d38a5..4b3d4f405 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -41,7 +41,7 @@ jobs: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" - RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^make generate$"]' + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$"]' with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' From d70ac09c5f85273f29ba664ea05565c01f11d0c3 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:58:33 +0000 Subject: [PATCH 109/370] fix(deps): update module google.golang.org/protobuf to v1.33.0 [security] --- ...extensions.crossplane.io_compositeresourcedefinitions.yaml | 1 + .../apiextensions.crossplane.io_compositionrevisions.yaml | 1 + cluster/crds/apiextensions.crossplane.io_compositions.yaml | 1 + .../crds/apiextensions.crossplane.io_environmentconfigs.yaml | 1 + cluster/crds/apiextensions.crossplane.io_usages.yaml | 1 + cluster/crds/pkg.crossplane.io_configurationrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_configurations.yaml | 1 + cluster/crds/pkg.crossplane.io_controllerconfigs.yaml | 1 + cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml | 1 + cluster/crds/pkg.crossplane.io_functionrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_functions.yaml | 1 + cluster/crds/pkg.crossplane.io_locks.yaml | 1 + cluster/crds/pkg.crossplane.io_providerrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_providers.yaml | 1 + cluster/crds/secrets.crossplane.io_storeconfigs.yaml | 1 + go.mod | 2 +- go.sum | 4 ++-- 17 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 8bb056fd0..21564a4a3 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index b2996800d..d400d40e2 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 0a0b8cdff..30ca8f6b9 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 1d80d90b2..03db70ad1 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index b4a4cc072..67d75b3f1 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index ed34a2b54..06c288c87 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index 6bc35a71e..feb414313 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 3cee9ece8..63c87485d 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index 7b80a697f..e01462654 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 4695ed512..8a1d42d73 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index cc895736c..94bc1ff17 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_locks.yaml b/cluster/crds/pkg.crossplane.io_locks.yaml index 4daff56a3..b55f731bf 100644 --- a/cluster/crds/pkg.crossplane.io_locks.yaml +++ b/cluster/crds/pkg.crossplane.io_locks.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 92ff14575..22a5f8397 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index d18030821..8b394dc44 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index 989d5fcb4..85ffc16a6 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/go.mod b/go.mod index 3ca664fff..17ea437a8 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/sync v0.6.0 google.golang.org/grpc v1.61.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 k8s.io/api v0.29.1 k8s.io/apiextensions-apiserver v0.29.1 k8s.io/apimachinery v0.29.1 diff --git a/go.sum b/go.sum index 0cbaff651..d21a4afd2 100644 --- a/go.sum +++ b/go.sum @@ -658,8 +658,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 2f29ac69841e317a29bb56ddf2e217d1bf64a0bc Mon Sep 17 00:00:00 2001 From: fuyangpengqi <995764973@qq.com> Date: Thu, 18 Apr 2024 21:14:33 +0800 Subject: [PATCH 110/370] chore: fix some typos in comments Signed-off-by: fuyangpengqi <995764973@qq.com> --- design/design-doc-composition-validating-webhook.md | 4 ++-- design/design-doc-provider-strategy.md | 2 +- design/one-pager-composition-environment.md | 2 +- design/one-pager-resource-connectivity-mvp.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/design/design-doc-composition-validating-webhook.md b/design/design-doc-composition-validating-webhook.md index 41b5ec555..756db6e25 100644 --- a/design/design-doc-composition-validating-webhook.md +++ b/design/design-doc-composition-validating-webhook.md @@ -208,7 +208,7 @@ unnecessary check in both `strict` and `loose` modes. #### Notes -A few additional notes worth highligting or making more explicit w.r.t. the description above: +A few additional notes worth highlighting or making more explicit w.r.t. the description above: * We identified 3 increasingly complex types of validation, that we will probably introduce in different phases and PRs: @@ -389,7 +389,7 @@ Ideally, the validation logic should be implemented as much as possible keeping in mind that it should be reusable for the following use-cases too: - linter -- langage server +- language server - future webhooks validating resources resulting in Compositions, e.g. Packages This does not mean that an initial implementation should be structured as a diff --git a/design/design-doc-provider-strategy.md b/design/design-doc-provider-strategy.md index 35735f7ff..256737162 100644 --- a/design/design-doc-provider-strategy.md +++ b/design/design-doc-provider-strategy.md @@ -472,7 +472,7 @@ From resource definition perspective, each cloud deserves its own summary: * Google TF uses DCL whenever possible already and the GCP API is already resource-based. * There isn't much discrepancy here. -* AWS TF seems to have generally been followin CloudFormation which is powered +* AWS TF seems to have generally been following CloudFormation which is powered by Cloud Control. * There are exceptions though, so one needs to check Cloud Control Registry. * Azure TF uses Azure SDK mostly and Azure API is already resource-based. diff --git a/design/one-pager-composition-environment.md b/design/one-pager-composition-environment.md index e4ffa9736..082a0278b 100644 --- a/design/one-pager-composition-environment.md +++ b/design/one-pager-composition-environment.md @@ -248,7 +248,7 @@ provider is responsible for implementing and supporting this feature. See https://github.com/crossplane/crossplane/issues/1770 for details. However, the security issues mentioned in [Patch from any Object](#patch-from-any-object) -would occure here as well. +would occur here as well. One could potentially use a managed resource to extract data from a secret within another namespace. diff --git a/design/one-pager-resource-connectivity-mvp.md b/design/one-pager-resource-connectivity-mvp.md index 27ec3fd28..c33c199cc 100644 --- a/design/one-pager-resource-connectivity-mvp.md +++ b/design/one-pager-resource-connectivity-mvp.md @@ -253,7 +253,7 @@ spec: By comparison, a direct translation of the [GKE cluster external resource]'s writable API object fields to a Kubernetes YAML specification would be as follows. Note that the GKE API contains several deprecated fields, all of which -are superceded by others (e.g. `nodeConfig` is superceded by `nodePools`). The +are superseded by others (e.g. `nodeConfig` is superseded by `nodePools`). The below translation omits these deprecated fields. ```yaml From 34f31f295a7ef8bb239e41f478dca56bc590c46e Mon Sep 17 00:00:00 2001 From: Carl Henrik Lunde Date: Thu, 18 Apr 2024 21:26:46 +0200 Subject: [PATCH 111/370] feat(crank/trace): get namespace from current context in kubeconfig Use client-go/tools/clientcmd directly to get the kubernetes config, as this allows us to get the namespace. controller-runtime/../config wraps this package but does expose namespace information. Signed-off-by: Carl Henrik Lunde --- cmd/crank/beta/trace/trace.go | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 35fc77072..2f2a89a7e 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -28,8 +28,8 @@ import ( "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" @@ -45,6 +45,7 @@ const ( errGetResource = "cannot get requested resource" errCliOutput = "cannot print output" errKubeConfig = "failed to get kubeconfig" + errKubeNamespace = "failed to get namespace from kubeconfig" errInitKubeClient = "cannot init kubeclient" errGetDiscoveryClient = "cannot get discovery client" errGetMapping = "cannot get mapping for resource" @@ -61,9 +62,8 @@ type Cmd struct { Name string `arg:"" help:"Name of the Crossplane resource, can be passed as part of the resource too." optional:""` // TODO(phisco): add support for all the usual kubectl flags; configFlags := genericclioptions.NewConfigFlags(true).AddFlags(...) - // TODO(phisco): move to namespace defaulting to "" and use the current context's namespace Context string `default:"" help:"Kubernetes context." name:"context" short:"c"` - Namespace string `default:"default" help:"Namespace of the resource." name:"namespace" short:"n"` + Namespace string `default:"" help:"Namespace of the resource." name:"namespace" short:"n"` Output string `default:"default" enum:"default,wide,json,dot" help:"Output format. One of: default, wide, json, dot." name:"output" short:"o"` ShowConnectionSecrets bool `help:"Show connection secrets in the output." name:"show-connection-secrets" short:"s"` ShowPackageDependencies string `default:"unique" enum:"unique,all,none" help:"Show package dependencies in the output. One of: unique, all, none." name:"show-package-dependencies"` @@ -114,7 +114,12 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { } logger.Debug("Built printer", "output", c.Output) - kubeconfig, err := config.GetConfigWithContext(c.Context) + clientconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{CurrentContext: c.Context}, + ) + + kubeconfig, err := clientconfig.ClientConfig() if err != nil { return errors.Wrap(err, errKubeConfig) } @@ -156,10 +161,18 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { APIVersion: mapping.GroupVersionKind.GroupVersion().String(), Name: name, } - if mapping.Scope.Name() == meta.RESTScopeNameNamespace && c.Namespace != "" { - logger.Debug("Requested resource is namespaced", "namespace", c.Namespace) - rootRef.Namespace = c.Namespace + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace := c.Namespace + if namespace == "" { + namespace, _, err = clientconfig.Namespace() + if err != nil { + return errors.Wrap(err, errKubeNamespace) + } + } + logger.Debug("Requested resource is namespaced", "namespace", namespace) + rootRef.Namespace = namespace } + logger.Debug("Getting resource tree", "rootRef", rootRef.String()) // Get client for k8s package root := resource.GetResource(ctx, client, rootRef) From 8b9791e3989c372558a969022a8a6bedff6036a9 Mon Sep 17 00:00:00 2001 From: Mayank Jha Date: Fri, 19 Apr 2024 09:38:54 -0400 Subject: [PATCH 112/370] Update ADOPTERS.md to include UiPath Signed-off-by: Mayank Jha --- ADOPTERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 31e8b0bf3..701f32897 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -81,4 +81,5 @@ This list is sorted in the order that organizations were added to it. | [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | | [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | | [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | -| [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| \ No newline at end of file +| [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| +| [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | From 132075d7b9a6dc85f813608c98e6e3b40618c759 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 08:07:52 +0000 Subject: [PATCH 113/370] chore(deps): update actions/upload-artifact digest to 1746f4a --- .github/workflows/ci.yml | 6 +++--- .github/workflows/scan.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd6e94833..4cbfa7ed5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -305,7 +305,7 @@ jobs: run: make e2e E2E_TEST_FLAGS="-test.v -test.failfast -fail-fast --kind-logs-location ./logs-kind --test-suite ${{ matrix.test-suite }}" - name: Upload artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 if: failure() with: name: e2e-kind-logs-${{ matrix.test-suite }} @@ -382,7 +382,7 @@ jobs: BUILD_ARGS: "--load" - name: Publish Artifacts to GitHub - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 with: name: output path: _output/** @@ -446,7 +446,7 @@ jobs: language: go - name: Upload Crash - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 83ccbb796..c7c4b7765 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -117,7 +117,7 @@ jobs: output: 'trivy-results.sarif' - name: Upload Artifact - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 with: name: trivy-${{ env.escaped_filename }}.sarif path: trivy-results.sarif From eb31dd5b3edbdc1445cf2ec18489819819ec29af Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 08:08:03 +0000 Subject: [PATCH 114/370] chore(deps): update gcr.io/distroless/static docker digest to 6d31326 --- cluster/images/crossplane/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/images/crossplane/Dockerfile b/cluster/images/crossplane/Dockerfile index eab6bd90c..9ae58992e 100644 --- a/cluster/images/crossplane/Dockerfile +++ b/cluster/images/crossplane/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/distroless/static@sha256:7e5c6a2a4ae854242874d36171b31d26e0539c98fc6080f942f16b03e82851ab +FROM gcr.io/distroless/static@sha256:6d31326376a7834b106f281b04f67b5d015c31732f594930f2ea81365f99d60c ARG TARGETOS ARG TARGETARCH From 960575d512ac0aff4f25f763ed807ceea633bfd1 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 08:08:09 +0000 Subject: [PATCH 115/370] chore(deps): update github/codeql-action digest to c7f9125 --- .github/workflows/ci.yml | 4 ++-- .github/workflows/scan.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd6e94833..1d97edd81 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,12 +158,12 @@ jobs: run: make vendor vendor.check - name: Initialize CodeQL - uses: github/codeql-action/init@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 + uses: github/codeql-action/init@c7f9125735019aa87cfc361530512d50ea439c71 # v3 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 + uses: github/codeql-action/analyze@c7f9125735019aa87cfc361530512d50ea439c71 # v3 trivy-scan-fs: runs-on: ubuntu-22.04 diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 83ccbb796..548718cdf 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -124,7 +124,7 @@ jobs: retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 + uses: github/codeql-action/upload-sarif@c7f9125735019aa87cfc361530512d50ea439c71 # v3 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} From 045445bbfb73939405d582cdc1c108a8b15447e4 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 08:08:35 +0000 Subject: [PATCH 116/370] chore(deps): update aquasecurity/trivy-action action to v0.19.0 --- .github/workflows/ci.yml | 2 +- .github/workflows/scan.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd6e94833..101da2ebd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,7 +176,7 @@ jobs: submodules: true - name: Run Trivy vulnerability scanner in fs mode - uses: aquasecurity/trivy-action@84384bd6e777ef152729993b8145ea352e9dd3ef # 0.17.0 + uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # 0.19.0 with: scan-type: 'fs' ignore-unfixed: true diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 83ccbb796..6b2bb144c 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -110,7 +110,7 @@ jobs: run: docker pull ${{ matrix.image }}:${{ env.tag }} - name: Run Trivy Vulnerability Scanner - uses: aquasecurity/trivy-action@84384bd6e777ef152729993b8145ea352e9dd3ef # 0.17.0 + uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # 0.19.0 with: image-ref: ${{ matrix.image }}:${{ env.tag }} format: 'sarif' From 4476d89d2e234b76fa27ca9de60170cf0964152e Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sat, 20 Apr 2024 09:25:39 +0100 Subject: [PATCH 117/370] fix: properly setup go before running make generate Signed-off-by: Philippe Scorsolini --- .github/workflows/renovate.yml | 43 +++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 4b3d4f405..d399ff0f4 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -15,6 +15,11 @@ on: type: string schedule: - cron: '0 8 * * *' + +env: + # Common versions + GO_VERSION: '1.22.0' + jobs: renovate: runs-on: ubuntu-latest @@ -22,19 +27,45 @@ jobs: !github.event.repository.fork && !github.event.pull_request.head.repo.fork steps: - - name: Get token - id: get-github-app-token - uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1 - with: - app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} - private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Checkout uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true + # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator + + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Find the Go Build Cache + id: go + run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + + - name: Cache the Go Build Cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + with: + path: ${{ steps.go.outputs.cache }} + key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-build-check-diff- + + - name: Cache Go Dependencies + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + + - name: Get token + id: get-github-app-token + uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1 + with: + app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} + private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} + - name: Self-hosted Renovate uses: renovatebot/github-action@89bd050bafa5a15de5d9383e3129edf210422004 # v40.1.5 env: From df9aaaffbd9ba4d74825a777e6390120eca579b2 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sat, 20 Apr 2024 12:06:23 +0100 Subject: [PATCH 118/370] chore(renovate): make generate working Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 1 + .github/workflows/renovate.yml | 41 +++++----------------------------- 2 files changed, 6 insertions(+), 36 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 457c7909f..605faa2d6 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -135,6 +135,7 @@ // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ "git submodule update --init", + "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", "make generate" ], fileFilters: ["**/*"], diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index d399ff0f4..0d2778f09 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -4,27 +4,18 @@ on: workflow_dispatch: inputs: logLevel: - description: "Override default log level" - required: false + description: "Renovate's log level" + required: true default: "info" type: string - overrideSchedule: - description: "Override all schedules" - required: false - default: "false" - type: string schedule: - cron: '0 8 * * *' -env: - # Common versions - GO_VERSION: '1.22.0' - jobs: renovate: runs-on: ubuntu-latest if: | - !github.event.repository.fork && + !github.event.repository.fork && !github.event.pull_request.head.repo.fork steps: - name: Checkout @@ -36,29 +27,6 @@ jobs: - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-check-diff- - - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 - with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- - - name: Get token id: get-github-app-token uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1 @@ -72,7 +40,8 @@ jobs: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" - RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$"]' + LOG_LEVEL: ${{ github.event.inputs.logLevel }} + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$", "^install-tool golang \\$\\(grep -oP \"\\^toolchain go\\\\K\\.\\+\" go\\.mod\\)$"]' with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' From da7d4ecca89d883fb03c1f3525172c45b97b57b2 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 11:23:03 +0000 Subject: [PATCH 119/370] chore(deps): update actions/cache digest to 0c45773 --- .github/workflows/ci.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd6e94833..8a1d4496e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,14 +41,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-check-diff- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} @@ -95,14 +95,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-lint- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} @@ -141,14 +141,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-check-diff- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} @@ -209,14 +209,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-unit-tests- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} @@ -277,14 +277,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-e2e-tests-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-e2e-tests- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} @@ -358,14 +358,14 @@ jobs: run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: ${{ steps.go.outputs.cache }} key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build-publish-artifacts- - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: .work/pkg key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} From 5eb3a20af6e556ece5d63bab45dfe0c9e3401323 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 20 Apr 2024 11:24:22 +0000 Subject: [PATCH 120/370] chore(deps): update renovatebot/github-action action to v40.1.10 --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 0d2778f09..5e144362e 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -35,7 +35,7 @@ jobs: private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Self-hosted Renovate - uses: renovatebot/github-action@89bd050bafa5a15de5d9383e3129edf210422004 # v40.1.5 + uses: renovatebot/github-action@2e021d24483d81e77e0e902d0809adfbfff276fc # v40.1.10 env: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits From beaa418f732ed5ad055f4518ba6b77a9ecd7da72 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Sat, 20 Apr 2024 19:58:04 +0200 Subject: [PATCH 121/370] governance: include steering committee contact info Signed-off-by: Jared Watts --- GOVERNANCE.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index cffc7d458..9e1dd854d 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -72,6 +72,17 @@ Here are the members of the initial steering committee (listed in alphabetical o | | [Jared Watts](https://github.com/jbw976) | Upbound | jared@upbound.io | 2024-02-06 | 2026-02-06 | | | [Nic Cope](https://github.com/negz) | Upbound | negz@upbound.io | 2024-02-06 | 2026-02-06 | +### Contact Info + +The steering committee can be reached at the following locations: + +* [`#steering-committee`](https://crossplane.slack.com/archives/C032WMA459S) + channel on the [Crossplane Slack](https://slack.crossplane.io/) workspace +* [`steering@crossplane.io`](mailto:steering@crossplane.io) public email address + +Members of the community as well as the broader ecosystem are welcome to contact +the steering committee for any issues or concerns they can assist with. + ### Election Process #### Eligibility for Voting From 7c87bdfc149637095e8d46c28640b21dd59a2557 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 10:47:43 +0100 Subject: [PATCH 122/370] ci: renovate default log level info Signed-off-by: Philippe Scorsolini --- .github/workflows/renovate.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 5e144362e..93cf8bf53 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -11,6 +11,9 @@ on: schedule: - cron: '0 8 * * *' +env: + LOG_LEVEL: "info" + jobs: renovate: runs-on: ubuntu-latest @@ -40,7 +43,7 @@ jobs: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" - LOG_LEVEL: ${{ github.event.inputs.logLevel }} + LOG_LEVEL: ${{ github.event.inputs.logLevel || env.LOG_LEVEL }} RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$", "^install-tool golang \\$\\(grep -oP \"\\^toolchain go\\\\K\\.\\+\" go\\.mod\\)$"]' with: configurationFile: .github/renovate.json5 From 851ea584ea2a80bee30b2c0ae36649287482fada Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 16:39:41 +0100 Subject: [PATCH 123/370] chore: golangci-lint switch from deprecated syntax Signed-off-by: Philippe Scorsolini --- .golangci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 3c13604f9..dba9ebb81 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,9 +1,6 @@ run: timeout: 10m - skip-files: - - "zz_generated\\..+\\.go$" - output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" format: colored-line-number @@ -222,7 +219,10 @@ linters-settings: json: goCamel issues: - # Excluding configuration per-path and per-linter + # Excluding generated files. + exclude-files: + - "zz_generated\\..+\\.go$" + # Excluding configuration per-path and per-linter. exclude-rules: # Exclude some linters from running on tests files. - path: _test(ing)?\.go From a4b7bbd6f9d00e5e9d100eb9c00272632bc2a43f Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 16:41:06 +0100 Subject: [PATCH 124/370] chore: golangci-lint log errors to stderr to have output in renovate Signed-off-by: Philippe Scorsolini --- .golangci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index dba9ebb81..c62817d9a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,9 @@ run: output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number + formats: + - format: colored-line-number + path: stderr linters: enable-all: true From 1f5e81104fbb570e0ad4b74a780e267f9db849e3 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 16:43:05 +0100 Subject: [PATCH 125/370] chore: renovate generate and lint when needed Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 42 ++++++++++++++++++++++++++-------- .github/workflows/renovate.yml | 2 +- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 605faa2d6..682ae7c17 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -73,6 +73,38 @@ // Renovate evaluates all packageRules in order, so low priority rules should // be at the beginning, high priority at the end "packageRules": [ + { + "description": "Generate code after upgrading go dependencies", + "matchDatasources": ["go"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "git submodule update --init", + "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", + "make generate", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "description": "Lint code after upgrading golangci-lint", + "matchDepNames": ["golangci/golangci-lint"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "git submodule update --init", + "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", + "make go.lint", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, { "matchManagers": ["crossplane"], "matchFileNames": ["test/e2e/**"], @@ -131,14 +163,4 @@ "enabled": false } ], - postUpgradeTasks: { -// Post-upgrade tasks that are executed before a commit is made by Renovate. - "commands": [ - "git submodule update --init", - "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", - "make generate" - ], - fileFilters: ["**/*"], - executionMode: "update", - } } diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 93cf8bf53..e54b14bcf 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -44,7 +44,7 @@ jobs: # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" LOG_LEVEL: ${{ github.event.inputs.logLevel || env.LOG_LEVEL }} - RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$", "^install-tool golang \\$\\(grep -oP \"\\^toolchain go\\\\K\\.\\+\" go\\.mod\\)$"]' + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$", "^install-tool golang \\$\\(grep -oP \"\\^toolchain go\\\\K\\.\\+\" go\\.mod\\)$", "^make go.lint$"]' with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' From cc29a0e009b2f69643e02636c3ed8ccb5dadce64 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sun, 21 Apr 2024 16:18:05 +0000 Subject: [PATCH 126/370] chore(deps): update dependency helm/helm to v3.14.4 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bb1c3ad18..318d15cac 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ GO_LINT_ARGS ?= "--fix" # Setup Kubernetes tools USE_HELM3 = true -HELM3_VERSION = v3.14.0 +HELM3_VERSION = v3.14.4 KIND_VERSION = v0.21.0 -include build/makelib/k8s_tools.mk From 285159e3dae716ef6f3b34f05d4010bb97809278 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 17:30:33 +0100 Subject: [PATCH 127/370] chore(renovate): group all go version updates Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 682ae7c17..5ed453749 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -161,6 +161,10 @@ "google/oss-fuzz" ], "enabled": false + }, { + "description": "Group all go version updates", + "matchDatasources": ["golang-version"], + "groupName": "golang version", } ], } From 82386fe0716e25455059b31bb76ebfef41d3ad14 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sun, 21 Apr 2024 16:59:51 +0100 Subject: [PATCH 128/370] chore(chart): set right empty list default value for imagePullSecrets Signed-off-by: Philippe Scorsolini --- cluster/charts/crossplane/README.md | 2 +- .../crossplane/templates/rbac-manager-serviceaccount.yaml | 4 ++-- cluster/charts/crossplane/templates/serviceaccount.yaml | 4 ++-- cluster/charts/crossplane/values.yaml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 63a11ff37..2fcd5e46d 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -82,7 +82,7 @@ and their default values. | `image.pullPolicy` | The image pull policy used for Crossplane and RBAC Manager pods. | `"IfNotPresent"` | | `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/crossplane/crossplane"` | | `image.tag` | The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`. | `""` | -| `imagePullSecrets` | The imagePullSecret names to add to the Crossplane ServiceAccount. | `{}` | +| `imagePullSecrets` | The imagePullSecret names to add to the Crossplane ServiceAccount. | `[]` | | `leaderElection` | Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the Crossplane pod. | `true` | | `metrics.enabled` | Enable Prometheus path, port and scrape annotations and expose port 8080 for both the Crossplane and RBAC Manager pods. | `false` | | `nodeSelector` | Add `nodeSelectors` to the Crossplane pod deployment. | `{}` | diff --git a/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml b/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml index ae00f94ad..fd1dcc977 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml @@ -7,9 +7,9 @@ metadata: labels: app: {{ template "crossplane.name" . }} {{- include "crossplane.labels" . | indent 4 }} -{{- if .Values.imagePullSecrets }} +{{- with .Values.imagePullSecrets }} imagePullSecrets: -{{- range $index, $secret := .Values.imagePullSecrets }} +{{- range $index, $secret := . }} - name: {{ $secret }} {{- end }} {{- end }} diff --git a/cluster/charts/crossplane/templates/serviceaccount.yaml b/cluster/charts/crossplane/templates/serviceaccount.yaml index 66d948c8e..fecec467a 100644 --- a/cluster/charts/crossplane/templates/serviceaccount.yaml +++ b/cluster/charts/crossplane/templates/serviceaccount.yaml @@ -9,9 +9,9 @@ metadata: {{- with .Values.serviceAccount.customAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} -{{- if .Values.imagePullSecrets }} +{{- with .Values.imagePullSecrets }} imagePullSecrets: -{{- range $index, $secret := .Values.imagePullSecrets }} +{{- range $index, $secret := . }} - name: {{ $secret }} {{- end }} {{ end }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 96e2f7976..4b88ccf60 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -56,7 +56,7 @@ function: packages: [] # -- The imagePullSecret names to add to the Crossplane ServiceAccount. -imagePullSecrets: {} +imagePullSecrets: [] registryCaBundleConfig: # -- The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. From 3482a86c6bb73b617319a9583461cceb949f7f19 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 08:09:09 +0000 Subject: [PATCH 129/370] chore(deps): update docker/setup-buildx-action digest to d70bba7 --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3c28bf32c..1a2777e67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -254,7 +254,7 @@ jobs: platforms: all - name: Setup Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3 + uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3 with: version: ${{ env.DOCKER_BUILDX_VERSION }} install: true @@ -335,7 +335,7 @@ jobs: platforms: all - name: Setup Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3 + uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3 with: version: ${{ env.DOCKER_BUILDX_VERSION }} install: true From b59c4339f8f8c6fc68d10d2f437ba341390948ca Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 5 Apr 2024 20:31:00 -0700 Subject: [PATCH 130/370] WIP: Clear XR field managers when upgrading to claim SSA Signed-off-by: Nic Cope --- .../apiextensions/claim/syncer_ssa.go | 80 +++++++++++++------ test/e2e/apiextensions_test.go | 5 +- 2 files changed, 61 insertions(+), 24 deletions(-) diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 18d4f405f..5d165d4ea 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -21,8 +21,6 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/csaupgrade" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -74,32 +72,68 @@ func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpg // Upgrade the supplied object's field managers from client-side to server-side // apply. -func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string, csaManagers ...string) error { - // UpgradeManagedFieldsPatch removes or replaces the specified CSA managers. - // Unfortunately most Crossplane controllers use CSA manager "crossplane". - // So we could for example fight with the XR controller: - // - // 1. We remove CSA manager "crossplane", triggering XR controller watch - // 2. XR controller uses CSA manager "crossplane", triggering our watch - // 3. Back to step 1 :) - // - // In practice we only need to upgrade once, to ensure we don't share fields - // that only this controller has ever applied with "crossplane". We assume - // that if our SSA manager already exists, we've done the upgrade. - for _, e := range obj.GetManagedFields() { +// +// This is a multi-step process. +// +// Step 1: All fields are owned by manager 'crossplane' operation 'Update'. This +// represents all fields set by the claim or XR controller up to this point. +// +// Step 2: Upgrade is called for the first time. We delete all field managers. +// +// Step 3: The claim controller server-side applies its fully specified intent +// as field manager 'apiextensions.crossplane.io/claim'. This becomes the +// manager of all the fields that are part of the claim controller's fully +// specified intent. All existing fields the claim controller didn't specify +// become owned by a special manager - 'before-first-apply', operation 'Update'. +// +// Step 4: Upgrade is called for the second time. It deletes the +// 'before-first-apply' field manager entry. Only the claim field manager +// remains. +// +// Step 5: Eventually the XR reconciler updates a field (e.g. spec.resourceRefs) +// and becomes owner of that field. +func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string, _ ...string) error { + // The XR doesn't exist, nothing to upgrade. + if !meta.WasCreated(obj) { + return nil + } + + foundSSA := false + foundBFA := false + idxBFA := -1 + + for i, e := range obj.GetManagedFields() { if e.Manager == ssaManager { - return nil + foundSSA = true + } + if e.Manager == "before-first-apply" { + foundBFA = true + idxBFA = i } } - p, err := csaupgrade.UpgradeManagedFieldsPatch(obj, sets.New[string](csaManagers...), ssaManager) - if err != nil { - return errors.Wrap(err, errCreatePatch) - } - if p == nil { - // No patch means there's nothing to upgrade. + + switch { + // If our SSA field manager exists and the before-first-apply field manager + // doesn't, we've already done the upgrade. Don't do it again. + case foundSSA && !foundBFA: return nil + + // We found our SSA field manager but also before-first-apply. It should now + // be safe to delete before-first-apply. + case foundSSA && foundBFA: + p := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/metadata/managedFields/%d"}]`, idxBFA)) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot remove before-first-apply from field managers") + + // We didn't find our SSA field manager or the before-first-apply field + // manager. This means we haven't started the upgrade. The first thing we + // want to do is clear all managed fields. After we do this we'll let our + // SSA field manager apply the fields it cares about. The result will be + // that our SSA field manager shares ownership with a new manager named + // 'before-first-apply'. + default: + p := []byte(`[{"op": "replace", "path": "/metadata/managedFields", "value": [{}]}]`) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") } - return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), errPatchFieldManagers) } // A ServerSideCompositeSyncer binds and syncs a claim with a composite resource diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 667700ca0..7906d8704 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -342,7 +342,10 @@ func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteSSAClaims)), funcs.ReadyToTestWithin(1*time.Minute, namespace), )). - Assess("UpdateClaim", funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml")). + Assess("UpdateClaim", funcs.AllOf( + funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml"), + funcs.ClaimUnderTestMustNotChangeWithin(1*time.Minute), + )). Assess("FieldsRemovalPropagatedToXR", funcs.AllOf( // Updates and deletes are propagated claim -> XR. funcs.CompositeResourceHasFieldValueWithin(1*time.Minute, manifests, "claim.yaml", "metadata.labels[foo]", "1"), From c62c9a1ca6c22c617c1d4b054ea4b6976082d93c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 22 Apr 2024 15:14:19 -0700 Subject: [PATCH 131/370] Update comment to accurately reflect XR field managers Signed-off-by: Nic Cope --- internal/controller/apiextensions/claim/syncer_ssa.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 5d165d4ea..85fc49ef3 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -75,8 +75,10 @@ func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpg // // This is a multi-step process. // -// Step 1: All fields are owned by manager 'crossplane' operation 'Update'. This -// represents all fields set by the claim or XR controller up to this point. +// Step 1: All fields are owned by either manager 'crossplane', operation +// 'Update' or manager 'apiextensions.crossplane.io/composite', operation +// 'Apply'. This represents all fields set by the claim or XR controller up to +// this point. // // Step 2: Upgrade is called for the first time. We delete all field managers. // From 1e58a2dbffa1a7f30bafcac2f270e38cfb6b8a6e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 22 Apr 2024 15:20:35 -0700 Subject: [PATCH 132/370] Replace resourceVersion in SSA upgrade patches https://github.com/kubernetes/client-go/blob/v0.30.0/util/csaupgrade/upgrade.go#L165 This appears to be necessary to ensure the patch is rejected if we've derived our patch from a stale version of the object. Signed-off-by: Nic Cope --- internal/controller/apiextensions/claim/syncer_ssa.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 85fc49ef3..25878c0af 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -123,7 +123,10 @@ func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client. // We found our SSA field manager but also before-first-apply. It should now // be safe to delete before-first-apply. case foundSSA && foundBFA: - p := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/metadata/managedFields/%d"}]`, idxBFA)) + p := []byte(fmt.Sprintf(`[ + {"op":"remove","path":"/metadata/managedFields/%d"}, + {"op":"replace","path":"/metadata/resourceVersion","value":"%s"} + ]`, idxBFA, obj.GetResourceVersion())) return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot remove before-first-apply from field managers") // We didn't find our SSA field manager or the before-first-apply field @@ -133,7 +136,10 @@ func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client. // that our SSA field manager shares ownership with a new manager named // 'before-first-apply'. default: - p := []byte(`[{"op": "replace", "path": "/metadata/managedFields", "value": [{}]}]`) + p := []byte(fmt.Sprintf(`[ + {"op":"replace","path": "/metadata/managedFields","value": [{}]}, + {"op":"replace","path":"/metadata/resourceVersion","value":"%s"} + ]`, obj.GetResourceVersion())) return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") } } From a93071ec7fc2c28042311c82a742e865d2ee07a1 Mon Sep 17 00:00:00 2001 From: findmyhappy Date: Wed, 24 Apr 2024 14:39:22 +0800 Subject: [PATCH 133/370] chore: fix struct name in comment Signed-off-by: findmyhappy --- internal/xpkg/reader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/xpkg/reader.go b/internal/xpkg/reader.go index f9d8c01b7..afb966002 100644 --- a/internal/xpkg/reader.go +++ b/internal/xpkg/reader.go @@ -89,7 +89,7 @@ func (t *teeReadCloser) Close() error { var _ io.ReadCloser = &joinedReadCloser{} -// joinedReadCloster joins a reader and a closer. It is typically used in the +// joinedReadCloser joins a reader and a closer. It is typically used in the // context of a ReadCloser being wrapped by a Reader. type joinedReadCloser struct { r io.Reader From 920687221994bc245362ce7b52d29bf84158154b Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Wed, 24 Apr 2024 11:17:12 +0200 Subject: [PATCH 134/370] Add SpareBank 1 Utvikling to adopters Signed-off-by: Jared Watts --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 701f32897..984649999 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -83,3 +83,4 @@ This list is sorted in the order that organizations were added to it. | [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | | [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | +| [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | From a07bce189fd39958ea1b4259238e0aa66394ccfd Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Wed, 24 Apr 2024 10:19:26 +0100 Subject: [PATCH 135/370] chore: remove dependency on bufbuild/buf Signed-off-by: Philippe Scorsolini --- .../fn/proto/v1beta1/run_function.pb.go | 2 +- apis/generate.go | 3 +- ...plane.io_compositeresourcedefinitions.yaml | 1 - ...ns.crossplane.io_compositionrevisions.yaml | 1 - ...extensions.crossplane.io_compositions.yaml | 1 - ...ions.crossplane.io_environmentconfigs.yaml | 1 - .../apiextensions.crossplane.io_usages.yaml | 1 - ....crossplane.io_configurationrevisions.yaml | 1 - .../pkg.crossplane.io_configurations.yaml | 1 - .../pkg.crossplane.io_controllerconfigs.yaml | 1 - ...rossplane.io_deploymentruntimeconfigs.yaml | 1 - .../pkg.crossplane.io_functionrevisions.yaml | 1 - cluster/crds/pkg.crossplane.io_functions.yaml | 1 - cluster/crds/pkg.crossplane.io_locks.yaml | 1 - .../pkg.crossplane.io_providerrevisions.yaml | 1 - cluster/crds/pkg.crossplane.io_providers.yaml | 1 - .../secrets.crossplane.io_storeconfigs.yaml | 1 - go.mod | 18 --------- go.sum | 39 ------------------- 19 files changed, 2 insertions(+), 75 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index d68823ae6..ee4b7352a 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/run_function.proto diff --git a/apis/generate.go b/apis/generate.go index 94ebe4468..7ca29c9b5 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -70,12 +70,11 @@ limitations under the License. // (or protoc) to invoke them. //go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc -//go:generate go run github.com/bufbuild/buf/cmd/buf generate +//go:generate go run github.com/bufbuild/buf/cmd/buf@v1.31.0 generate package apis import ( - _ "github.com/bufbuild/buf/cmd/buf" //nolint:typecheck _ "github.com/jmattheis/goverter/cmd/goverter" //nolint:typecheck _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" //nolint:typecheck _ "google.golang.org/protobuf/cmd/protoc-gen-go" //nolint:typecheck diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 21564a4a3..8bb056fd0 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index d400d40e2..b2996800d 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 30ca8f6b9..0a0b8cdff 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 03db70ad1..1d80d90b2 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index 67d75b3f1..b4a4cc072 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 06c288c87..ed34a2b54 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index feb414313..6bc35a71e 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 63c87485d..3cee9ece8 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index e01462654..7b80a697f 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 8a1d42d73..4695ed512 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 94bc1ff17..cc895736c 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_locks.yaml b/cluster/crds/pkg.crossplane.io_locks.yaml index b55f731bf..4daff56a3 100644 --- a/cluster/crds/pkg.crossplane.io_locks.yaml +++ b/cluster/crds/pkg.crossplane.io_locks.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 22a5f8397..92ff14575 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 8b394dc44..d18030821 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index 85ffc16a6..989d5fcb4 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/go.mod b/go.mod index 17ea437a8..4f4d294f5 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,6 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.8.1 - github.com/bufbuild/buf v1.27.2 github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 @@ -47,8 +46,6 @@ require ( ) require ( - connectrpc.com/connect v1.13.0 // indirect - connectrpc.com/otelconnect v0.6.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect @@ -71,7 +68,6 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jdx/go-netrc v1.0.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect @@ -124,11 +120,9 @@ require ( github.com/aws/smithy-go v1.19.0 github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/protocompile v0.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dave/jennifer v1.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -140,9 +134,7 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/fatih/color v1.16.0 // indirect - github.com/felixge/fgprof v0.9.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-chi/chi/v5 v5.0.11 // indirect github.com/go-logr/logr v1.4.1 github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect @@ -150,13 +142,11 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gobuffalo/flect v1.0.2 // indirect - github.com/gofrs/uuid/v5 v5.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 // indirect github.com/google/uuid v1.6.0 github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -164,7 +154,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/pgzip v1.2.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -173,28 +162,21 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/profile v1.7.0 // indirect github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rs/cors v1.10.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vladimirvivien/gexe v0.2.0 // indirect go.opentelemetry.io/otel v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/sdk v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect - go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.21.0 // indirect diff --git a/go.sum b/go.sum index d21a4afd2..492095aaa 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,6 @@ cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiV cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -connectrpc.com/connect v1.13.0 h1:lGs5maZZzWOOD+PFFiOt5OncKmMsk9ZdPwpy5jcmaYg= -connectrpc.com/connect v1.13.0/go.mod h1:uHAFHtYgeSZJxXrkN1IunDpKghnTXhYbVh0wW4StPW0= -connectrpc.com/otelconnect v0.6.0 h1:VJAdQL9+sgdUw9+7+J+jq8pQo/h1S7tSFv2+vDcR7bU= -connectrpc.com/otelconnect v0.6.0/go.mod h1:jdcs0uiwXQVmSMgTJ2dAaWR5VbpNd7QKNkuoH7n86RA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -99,10 +95,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bufbuild/buf v1.27.2 h1:uX2kvZfPfRoOsrxUW4LwpykSyH+wI5dUnIG0QWHDCCU= -github.com/bufbuild/buf v1.27.2/go.mod h1:7RImDhFDqhEsdK5wbuMhoVSlnrMggGGcd3s9WozvHtM= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -129,7 +121,6 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -187,16 +178,12 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwC github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= -github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= -github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -226,10 +213,6 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -281,7 +264,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= @@ -301,7 +283,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -309,11 +290,7 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= -github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmattheis/goverter v1.3.2 h1:KBuJkqYtZAMsK6QG11+3RdxXZJWwULl+r0M6RWlXU4s= github.com/jmattheis/goverter v1.3.2/go.mod h1:Il/E+0riIfIgRBUpM+Fnh2s8/sJhMp5NeDZZenTd6S4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -330,8 +307,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= -github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -392,13 +367,9 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= -github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= @@ -412,8 +383,6 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= @@ -447,8 +416,6 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tetratelabs/wazero v1.6.0 h1:z0H1iikCdP8t+q341xqepY4EWvHEw8Es7tlqiVzlP3g= -github.com/tetratelabs/wazero v1.6.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e h1:aNzUuv4ZKH2OT3Qv6dpZxkMPDOfl/6MoS79T/zUzako= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e/go.mod h1:IDIbYDb9fbedtxCc2CrdGcVRol6la7z2gkKh0VYWVGk= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= @@ -485,16 +452,12 @@ go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPi go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= -go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -570,14 +533,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 8fa505eccfd7c7684fd365895bb11100b3c22d69 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 25 Apr 2024 08:09:17 +0000 Subject: [PATCH 136/370] chore(deps): update golang version to v1.22.2 --- .github/workflows/ci.yml | 2 +- .github/workflows/promote.yml | 2 +- go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1a2777e67..c106c0c0a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.22.0' + GO_VERSION: '1.22.2' GOLANGCI_VERSION: 'v1.56.2' DOCKER_BUILDX_VERSION: 'v0.10.0' diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 5b14a2cf2..34ba83962 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -21,7 +21,7 @@ on: env: # Common versions - GO_VERSION: '1.22.0' + GO_VERSION: '1.22.2' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether diff --git a/go.mod b/go.mod index 3ca664fff..b57c72cb3 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/crossplane/crossplane go 1.21 -toolchain go1.21.6 +toolchain go1.22.2 require ( dario.cat/mergo v1.0.0 From 2f611986d42569be2b8778966a9dfa66f6392fc1 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sun, 21 Apr 2024 16:21:02 +0000 Subject: [PATCH 137/370] chore(deps): update dependency golangci/golangci-lint to v1.57.2 --- .github/workflows/ci.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c106c0c0a..c7c05fa56 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ on: env: # Common versions GO_VERSION: '1.22.2' - GOLANGCI_VERSION: 'v1.56.2' + GOLANGCI_VERSION: 'v1.57.2' DOCKER_BUILDX_VERSION: 'v0.10.0' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run diff --git a/Makefile b/Makefile index 318d15cac..f852c8a92 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION) GO_SUBDIRS += cmd internal apis GO111MODULE = on -GOLANGCILINT_VERSION = 1.56.2 +GOLANGCILINT_VERSION = 1.57.2 GO_LINT_ARGS ?= "--fix" -include build/makelib/golang.mk From a438f5678962a7ac82c91d4c71606782814dd314 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 25 Apr 2024 10:52:50 +0100 Subject: [PATCH 138/370] chore: ignore contextcheck linter on deletion replay Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/usage/reconciler.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 0ef151edd..8ed3fc670 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -320,6 +320,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil { if policy, ok := used.GetAnnotations()[usage.AnnotationKeyDeletionAttempt]; ok { // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. + //nolint:contextcheck // See comment on Delete below. go func() { // We do the deletion async and after some delay to make sure the usage is deleted before the // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear From 3dd314272bf12d58a73165f1413f09d0cf9272e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20G=C3=B3mez?= Date: Thu, 25 Apr 2024 16:31:58 +0100 Subject: [PATCH 139/370] Add Veset to adopters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Pablo Gómez --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 984649999..fe26e699b 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -84,3 +84,4 @@ This list is sorted in the order that organizations were added to it. | [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | +| [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources to be managed by Crossplane. | From 8eb3bb102a71cab0780ca068921702f732a68b4b Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Thu, 25 Apr 2024 17:47:43 +0200 Subject: [PATCH 140/370] clarify production usage for Veset in ADOPTERS.md Signed-off-by: Jared Watts --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index fe26e699b..cf982aa11 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -84,4 +84,4 @@ This list is sorted in the order that organizations were added to it. | [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | -| [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources to be managed by Crossplane. | +| [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | From b5f7c78d23d0dc108919c10a329739e3a927d8f6 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 3 Apr 2024 04:29:01 -0700 Subject: [PATCH 141/370] Support loading composition function credentials from secrets In future we might support other sources, but for now I think secrets will be a good start. I've designed both the Kubernetes and protobuf APIs to support adding other credential sources / shapes in future. Signed-off-by: Nic Cope --- .../fn/proto/v1beta1/run_function.pb.go | 657 +++++++++++------- .../fn/proto/v1beta1/run_function.proto | 18 + apis/apiextensions/v1/composition_common.go | 30 + .../v1/zz_generated.conversion.go | 21 + .../apiextensions/v1/zz_generated.deepcopy.go | 25 + .../zz_generated.composition_common.go | 30 + .../v1beta1/zz_generated.deepcopy.go | 37 +- ...ns.crossplane.io_compositionrevisions.yaml | 56 ++ ...extensions.crossplane.io_compositions.yaml | 28 + .../composite/composition_functions.go | 16 + .../composite/composition_functions_test.go | 68 +- .../functions/setup/composition.yaml | 5 + .../composition/functions/setup/secret.yaml | 8 + 13 files changed, 756 insertions(+), 243 deletions(-) mode change 100755 => 100644 apis/apiextensions/v1/zz_generated.conversion.go create mode 100644 test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index d68823ae6..d2ef33ef0 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -188,6 +188,9 @@ type RunFunctionRequest struct { // did not exist, Crossplane sets the map key to an empty Resources message to // indicate that it attempted to satisfy the request. ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional credentials that this Function may use to communicate with an + // external system. + Credentials *Credentials `protobuf:"bytes,7,opt,name=credentials,proto3,oneof" json:"credentials,omitempty"` } func (x *RunFunctionRequest) Reset() { @@ -264,6 +267,132 @@ func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { return nil } +func (x *RunFunctionRequest) GetCredentials() *Credentials { + if x != nil { + return x.Credentials + } + return nil +} + +// Credentials that a Function may use to communicate with an external system. +type Credentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the credentials. + // + // Types that are assignable to Source: + // + // *Credentials_CredentialData + Source isCredentials_Source `protobuf_oneof:"source"` +} + +func (x *Credentials) Reset() { + *x = Credentials{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credentials) ProtoMessage() {} + +func (x *Credentials) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credentials.ProtoReflect.Descriptor instead. +func (*Credentials) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} +} + +func (m *Credentials) GetSource() isCredentials_Source { + if m != nil { + return m.Source + } + return nil +} + +func (x *Credentials) GetCredentialData() *CredentialData { + if x, ok := x.GetSource().(*Credentials_CredentialData); ok { + return x.CredentialData + } + return nil +} + +type isCredentials_Source interface { + isCredentials_Source() +} + +type Credentials_CredentialData struct { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData *CredentialData `protobuf:"bytes,1,opt,name=credential_data,json=credentialData,proto3,oneof"` +} + +func (*Credentials_CredentialData) isCredentials_Source() {} + +// CredentialData loaded by Crossplane, for example from a Secret. +type CredentialData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data map[string][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CredentialData) Reset() { + *x = CredentialData{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CredentialData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialData) ProtoMessage() {} + +func (x *CredentialData) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CredentialData.ProtoReflect.Descriptor instead. +func (*CredentialData) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} +} + +func (x *CredentialData) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + // Resources represents the state of several Crossplane resources. type Resources struct { state protoimpl.MessageState @@ -276,7 +405,7 @@ type Resources struct { func (x *Resources) Reset() { *x = Resources{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -289,7 +418,7 @@ func (x *Resources) String() string { func (*Resources) ProtoMessage() {} func (x *Resources) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -302,7 +431,7 @@ func (x *Resources) ProtoReflect() protoreflect.Message { // Deprecated: Use Resources.ProtoReflect.Descriptor instead. func (*Resources) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} } func (x *Resources) GetItems() []*Resource { @@ -344,7 +473,7 @@ type RunFunctionResponse struct { func (x *RunFunctionResponse) Reset() { *x = RunFunctionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -357,7 +486,7 @@ func (x *RunFunctionResponse) String() string { func (*RunFunctionResponse) ProtoMessage() {} func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -370,7 +499,7 @@ func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. func (*RunFunctionResponse) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{4} } func (x *RunFunctionResponse) GetMeta() *ResponseMeta { @@ -422,7 +551,7 @@ type RequestMeta struct { func (x *RequestMeta) Reset() { *x = RequestMeta{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -435,7 +564,7 @@ func (x *RequestMeta) String() string { func (*RequestMeta) ProtoMessage() {} func (x *RequestMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -448,7 +577,7 @@ func (x *RequestMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. func (*RequestMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{5} } func (x *RequestMeta) GetTag() string { @@ -472,7 +601,7 @@ type Requirements struct { func (x *Requirements) Reset() { *x = Requirements{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -485,7 +614,7 @@ func (x *Requirements) String() string { func (*Requirements) ProtoMessage() {} func (x *Requirements) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -498,7 +627,7 @@ func (x *Requirements) ProtoReflect() protoreflect.Message { // Deprecated: Use Requirements.ProtoReflect.Descriptor instead. func (*Requirements) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{4} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{6} } func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { @@ -526,7 +655,7 @@ type ResourceSelector struct { func (x *ResourceSelector) Reset() { *x = ResourceSelector{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -539,7 +668,7 @@ func (x *ResourceSelector) String() string { func (*ResourceSelector) ProtoMessage() {} func (x *ResourceSelector) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -552,7 +681,7 @@ func (x *ResourceSelector) ProtoReflect() protoreflect.Message { // Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. func (*ResourceSelector) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{5} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{7} } func (x *ResourceSelector) GetApiVersion() string { @@ -618,7 +747,7 @@ type MatchLabels struct { func (x *MatchLabels) Reset() { *x = MatchLabels{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -631,7 +760,7 @@ func (x *MatchLabels) String() string { func (*MatchLabels) ProtoMessage() {} func (x *MatchLabels) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -644,7 +773,7 @@ func (x *MatchLabels) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. func (*MatchLabels) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{6} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{8} } func (x *MatchLabels) GetLabels() map[string]string { @@ -672,7 +801,7 @@ type ResponseMeta struct { func (x *ResponseMeta) Reset() { *x = ResponseMeta{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -685,7 +814,7 @@ func (x *ResponseMeta) String() string { func (*ResponseMeta) ProtoMessage() {} func (x *ResponseMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -698,7 +827,7 @@ func (x *ResponseMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. func (*ResponseMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{7} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{9} } func (x *ResponseMeta) GetTag() string { @@ -730,7 +859,7 @@ type State struct { func (x *State) Reset() { *x = State{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -743,7 +872,7 @@ func (x *State) String() string { func (*State) ProtoMessage() {} func (x *State) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -756,7 +885,7 @@ func (x *State) ProtoReflect() protoreflect.Message { // Deprecated: Use State.ProtoReflect.Descriptor instead. func (*State) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{8} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{10} } func (x *State) GetComposite() *Resource { @@ -822,7 +951,7 @@ type Resource struct { func (x *Resource) Reset() { *x = Resource{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -835,7 +964,7 @@ func (x *Resource) String() string { func (*Resource) ProtoMessage() {} func (x *Resource) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -848,7 +977,7 @@ func (x *Resource) ProtoReflect() protoreflect.Message { // Deprecated: Use Resource.ProtoReflect.Descriptor instead. func (*Resource) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{9} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{11} } func (x *Resource) GetResource() *structpb.Struct { @@ -887,7 +1016,7 @@ type Result struct { func (x *Result) Reset() { *x = Result{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -900,7 +1029,7 @@ func (x *Result) String() string { func (*Result) ProtoMessage() {} func (x *Result) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -913,7 +1042,7 @@ func (x *Result) ProtoReflect() protoreflect.Message { // Deprecated: Use Result.ProtoReflect.Descriptor instead. func (*Result) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{10} + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{12} } func (x *Result) GetSeverity() Severity { @@ -942,7 +1071,7 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xba, 0x04, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6f, 0x22, 0x9e, 0x05, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, @@ -969,156 +1098,179 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x6c, 0x0a, 0x13, 0x45, 0x78, - 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x4b, - 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xf0, 0x02, 0x0a, 0x13, - 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x02, 0x52, 0x0b, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x6c, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, - 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x22, 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x59, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, - 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, - 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, - 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, - 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, - 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, - 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x4b, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xf0, 0x02, + 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, - 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, - 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, - 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, - 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, + 0x01, 0x01, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, + 0x67, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, - 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, + 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, + 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, + 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, - 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, - 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, + 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, + 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, + 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x6e, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x68, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, - 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x3f, 0x0a, 0x05, 0x52, - 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, - 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, - 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, - 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, - 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, - 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, - 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, 0x52, - 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, - 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, - 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, - 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x6e, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x12, 0x3b, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, + 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, + 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x3f, 0x0a, + 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, + 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, + 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, + 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, + 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, + 0x0b, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, + 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, + 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1134,62 +1286,68 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte } var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity (*RunFunctionRequest)(nil), // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest - (*Resources)(nil), // 3: apiextensions.fn.proto.v1beta1.Resources - (*RunFunctionResponse)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionResponse - (*RequestMeta)(nil), // 5: apiextensions.fn.proto.v1beta1.RequestMeta - (*Requirements)(nil), // 6: apiextensions.fn.proto.v1beta1.Requirements - (*ResourceSelector)(nil), // 7: apiextensions.fn.proto.v1beta1.ResourceSelector - (*MatchLabels)(nil), // 8: apiextensions.fn.proto.v1beta1.MatchLabels - (*ResponseMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.ResponseMeta - (*State)(nil), // 10: apiextensions.fn.proto.v1beta1.State - (*Resource)(nil), // 11: apiextensions.fn.proto.v1beta1.Resource - (*Result)(nil), // 12: apiextensions.fn.proto.v1beta1.Result - nil, // 13: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - nil, // 14: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - nil, // 15: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - nil, // 16: apiextensions.fn.proto.v1beta1.State.ResourcesEntry - nil, // 17: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - (*structpb.Struct)(nil), // 18: google.protobuf.Struct - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration + (*Credentials)(nil), // 3: apiextensions.fn.proto.v1beta1.Credentials + (*CredentialData)(nil), // 4: apiextensions.fn.proto.v1beta1.CredentialData + (*Resources)(nil), // 5: apiextensions.fn.proto.v1beta1.Resources + (*RunFunctionResponse)(nil), // 6: apiextensions.fn.proto.v1beta1.RunFunctionResponse + (*RequestMeta)(nil), // 7: apiextensions.fn.proto.v1beta1.RequestMeta + (*Requirements)(nil), // 8: apiextensions.fn.proto.v1beta1.Requirements + (*ResourceSelector)(nil), // 9: apiextensions.fn.proto.v1beta1.ResourceSelector + (*MatchLabels)(nil), // 10: apiextensions.fn.proto.v1beta1.MatchLabels + (*ResponseMeta)(nil), // 11: apiextensions.fn.proto.v1beta1.ResponseMeta + (*State)(nil), // 12: apiextensions.fn.proto.v1beta1.State + (*Resource)(nil), // 13: apiextensions.fn.proto.v1beta1.Resource + (*Result)(nil), // 14: apiextensions.fn.proto.v1beta1.Result + nil, // 15: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + nil, // 16: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + nil, // 17: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + nil, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + nil, // 19: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 20: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 21: google.protobuf.Struct + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration } var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ - 5, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta - 10, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State - 10, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 18, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct - 18, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct - 13, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - 11, // 6: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource - 9, // 7: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta - 10, // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 12, // 9: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result - 18, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct - 6, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements - 14, // 12: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - 8, // 13: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels - 15, // 14: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - 19, // 15: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration - 11, // 16: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource - 16, // 17: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry - 18, // 18: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct - 17, // 19: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - 0, // 20: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready - 1, // 21: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity - 3, // 22: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources - 7, // 23: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector - 11, // 24: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource - 2, // 25: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest - 4, // 26: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse - 26, // [26:27] is the sub-list for method output_type - 25, // [25:26] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 7, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta + 12, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State + 12, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 21, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 21, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 15, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + 3, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 4, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData + 16, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + 13, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource + 11, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta + 12, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 14, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result + 21, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 8, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements + 17, // 15: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + 10, // 16: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels + 18, // 17: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + 22, // 18: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 13, // 19: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource + 19, // 20: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 21, // 21: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 20, // 22: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 0, // 23: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready + 1, // 24: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity + 5, // 25: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources + 9, // 26: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector + 13, // 27: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 2, // 28: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 6, // 29: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 29, // [29:30] is the sub-list for method output_type + 28, // [28:29] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } @@ -1211,7 +1369,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Resources); i { + switch v := v.(*Credentials); i { case 0: return &v.state case 1: @@ -1223,7 +1381,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunFunctionResponse); i { + switch v := v.(*CredentialData); i { case 0: return &v.state case 1: @@ -1235,7 +1393,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestMeta); i { + switch v := v.(*Resources); i { case 0: return &v.state case 1: @@ -1247,7 +1405,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Requirements); i { + switch v := v.(*RunFunctionResponse); i { case 0: return &v.state case 1: @@ -1259,7 +1417,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceSelector); i { + switch v := v.(*RequestMeta); i { case 0: return &v.state case 1: @@ -1271,7 +1429,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MatchLabels); i { + switch v := v.(*Requirements); i { case 0: return &v.state case 1: @@ -1283,7 +1441,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseMeta); i { + switch v := v.(*ResourceSelector); i { case 0: return &v.state case 1: @@ -1295,7 +1453,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*State); i { + switch v := v.(*MatchLabels); i { case 0: return &v.state case 1: @@ -1307,7 +1465,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Resource); i { + switch v := v.(*ResponseMeta); i { case 0: return &v.state case 1: @@ -1319,6 +1477,30 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*State); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Result); i { case 0: return &v.state @@ -1332,19 +1514,22 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Credentials_CredentialData)(nil), + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ (*ResourceSelector_MatchName)(nil), (*ResourceSelector_MatchLabels)(nil), } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 19, NumExtensions: 0, NumServices: 1, }, diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1beta1/run_function.proto index 0f53ef19f..bc8eba5ff 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.proto @@ -68,6 +68,24 @@ message RunFunctionRequest { // did not exist, Crossplane sets the map key to an empty Resources message to // indicate that it attempted to satisfy the request. map extra_resources = 6; + + // Optional credentials that this Function may use to communicate with an + // external system. + optional Credentials credentials = 7; +} + +// Credentials that a Function may use to communicate with an external system. +message Credentials { + // Source of the credentials. + oneof source { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData credential_data = 1; + } +} + +// CredentialData loaded by Crossplane, for example from a Secret. +message CredentialData { + map data = 1; } // Resources represents the state of several Crossplane resources. diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 14982abd1..73e5725dd 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -298,6 +298,9 @@ type PipelineStep struct { // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:EmbeddedResource Input *runtime.RawExtension `json:"input,omitempty"` + + // Credentials are optional credentials that the Composition Function needs. + Credentials *FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -307,6 +310,33 @@ type FunctionReference struct { Name string `json:"name"` } +// FunctionCredentials are optional credentials that a Composition Function +// needs to run. +type FunctionCredentials struct { + // Source of the function credentials. + // +kubebuilder:validation:Enum=None;Secret + Source FunctionCredentialsSource `json:"source"` + + // A SecretRef is a reference to a secret key that contains the credentials + // that must be used to connect to the provider. + // +optional + SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` +} + +// A FunctionCredentialsSource is a source from which Composition Function +// credentials may be acquired. +type FunctionCredentialsSource string + +const ( + // FunctionCredentialsSourceNone indicates that a function does not require + // credentials. + FunctionCredentialsSourceNone FunctionCredentialsSource = "None" + + // FunctionCredentialsSourceSecret indicates that a function should acquire + // credentials from a secret. + FunctionCredentialsSourceSecret FunctionCredentialsSource = "Secret" +) + // A StoreConfigReference references a secret store config that may be used to // write connection details. type StoreConfigReference struct { diff --git a/apis/apiextensions/v1/zz_generated.conversion.go b/apis/apiextensions/v1/zz_generated.conversion.go old mode 100755 new mode 100644 index d843ef6ae..a96750c78 --- a/apis/apiextensions/v1/zz_generated.conversion.go +++ b/apis/apiextensions/v1/zz_generated.conversion.go @@ -211,6 +211,16 @@ func (c *GeneratedRevisionSpecConverter) pV1EnvironmentSourceSelectorToPV1Enviro } return pV1EnvironmentSourceSelector } +func (c *GeneratedRevisionSpecConverter) pV1FunctionCredentialsToPV1FunctionCredentials(source *FunctionCredentials) *FunctionCredentials { + var pV1FunctionCredentials *FunctionCredentials + if source != nil { + var v1FunctionCredentials FunctionCredentials + v1FunctionCredentials.Source = FunctionCredentialsSource((*source).Source) + v1FunctionCredentials.SecretRef = c.pV1SecretReferenceToPV1SecretReference((*source).SecretRef) + pV1FunctionCredentials = &v1FunctionCredentials + } + return pV1FunctionCredentials +} func (c *GeneratedRevisionSpecConverter) pV1MapTransformToPV1MapTransform(source *MapTransform) *MapTransform { var pV1MapTransform *MapTransform if source != nil { @@ -337,6 +347,16 @@ func (c *GeneratedRevisionSpecConverter) pV1PolicyToPV1Policy(source *v11.Policy } return pV1Policy } +func (c *GeneratedRevisionSpecConverter) pV1SecretReferenceToPV1SecretReference(source *v11.SecretReference) *v11.SecretReference { + var pV1SecretReference *v11.SecretReference + if source != nil { + var v1SecretReference v11.SecretReference + v1SecretReference.Name = (*source).Name + v1SecretReference.Namespace = (*source).Namespace + pV1SecretReference = &v1SecretReference + } + return pV1SecretReference +} func (c *GeneratedRevisionSpecConverter) pV1StoreConfigReferenceToPV1StoreConfigReference(source *StoreConfigReference) *StoreConfigReference { var pV1StoreConfigReference *StoreConfigReference if source != nil { @@ -626,6 +646,7 @@ func (c *GeneratedRevisionSpecConverter) v1PipelineStepToV1PipelineStep(source P v1PipelineStep.Step = source.Step v1PipelineStep.FunctionRef = c.v1FunctionReferenceToV1FunctionReference(source.FunctionRef) v1PipelineStep.Input = c.pRuntimeRawExtensionToPRuntimeRawExtension(source.Input) + v1PipelineStep.Credentials = c.pV1FunctionCredentialsToPV1FunctionCredentials(source.Credentials) return v1PipelineStep } func (c *GeneratedRevisionSpecConverter) v1ReadinessCheckToV1ReadinessCheck(source ReadinessCheck) ReadinessCheck { diff --git a/apis/apiextensions/v1/zz_generated.deepcopy.go b/apis/apiextensions/v1/zz_generated.deepcopy.go index ac1d21cfe..d3595b903 100644 --- a/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -851,6 +851,26 @@ func (in *EnvironmentSourceSelectorLabelMatcher) DeepCopy() *EnvironmentSourceSe return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionCredentials) DeepCopyInto(out *FunctionCredentials) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(commonv1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionCredentials. +func (in *FunctionCredentials) DeepCopy() *FunctionCredentials { + if in == nil { + return nil + } + out := new(FunctionCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionReference) DeepCopyInto(out *FunctionReference) { *out = *in @@ -1100,6 +1120,11 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(FunctionCredentials) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStep. diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index edc389696..c4a8f7eff 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -300,6 +300,9 @@ type PipelineStep struct { // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:EmbeddedResource Input *runtime.RawExtension `json:"input,omitempty"` + + // Credentials are optional credentials that the Composition Function needs. + Credentials *FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -309,6 +312,33 @@ type FunctionReference struct { Name string `json:"name"` } +// FunctionCredentials are optional credentials that a Composition Function +// needs to run. +type FunctionCredentials struct { + // Source of the function credentials. + // +kubebuilder:validation:Enum=None;Secret + Source FunctionCredentialsSource `json:"source"` + + // A SecretRef is a reference to a secret key that contains the credentials + // that must be used to connect to the provider. + // +optional + SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` +} + +// A FunctionCredentialsSource is a source from which Composition Function +// credentials may be acquired. +type FunctionCredentialsSource string + +const ( + // FunctionCredentialsSourceNone indicates that a function does not require + // credentials. + FunctionCredentialsSourceNone FunctionCredentialsSource = "None" + + // FunctionCredentialsSourceSecret indicates that a function should acquire + // credentials from a secret. + FunctionCredentialsSourceSecret FunctionCredentialsSource = "Secret" +) + // A StoreConfigReference references a secret store config that may be used to // write connection details. type StoreConfigReference struct { diff --git a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index e3cd28ed5..4df4dd964 100644 --- a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package v1beta1 import ( - commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "github.com/crossplane/crossplane-runtime/apis/common/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -305,7 +305,7 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) *out = *in if in.DefaultData != nil { in, out := &in.DefaultData, &out.DefaultData - *out = make(map[string]v1.JSON, len(*in)) + *out = make(map[string]apiextensionsv1.JSON, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -326,7 +326,7 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) } if in.Policy != nil { in, out := &in.Policy, &out.Policy - *out = new(commonv1.Policy) + *out = new(v1.Policy) (*in).DeepCopyInto(*out) } } @@ -485,6 +485,26 @@ func (in *EnvironmentSourceSelectorLabelMatcher) DeepCopy() *EnvironmentSourceSe return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionCredentials) DeepCopyInto(out *FunctionCredentials) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionCredentials. +func (in *FunctionCredentials) DeepCopy() *FunctionCredentials { + if in == nil { + return nil + } + out := new(FunctionCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionReference) DeepCopyInto(out *FunctionReference) { *out = *in @@ -505,7 +525,7 @@ func (in *MapTransform) DeepCopyInto(out *MapTransform) { *out = *in if in.Pairs != nil { in, out := &in.Pairs, &out.Pairs - *out = make(map[string]v1.JSON, len(*in)) + *out = make(map[string]apiextensionsv1.JSON, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -673,7 +693,7 @@ func (in *PatchPolicy) DeepCopyInto(out *PatchPolicy) { } if in.MergeOptions != nil { in, out := &in.MergeOptions, &out.MergeOptions - *out = new(commonv1.MergeOptions) + *out = new(v1.MergeOptions) (*in).DeepCopyInto(*out) } } @@ -719,6 +739,11 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(FunctionCredentials) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStep. diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index b2996800d..9477187dc 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -991,6 +991,34 @@ spec: items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + properties: + secretRef: + description: |- + A SecretRef is a reference to a secret key that contains the credentials + that must be used to connect to the provider. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - source + type: object functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -2577,6 +2605,34 @@ spec: items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + properties: + secretRef: + description: |- + A SecretRef is a reference to a secret key that contains the credentials + that must be used to connect to the provider. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - source + type: object functionRef: description: |- FunctionRef is a reference to the Composition Function this step should diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 0a0b8cdff..f2235682c 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -986,6 +986,34 @@ spec: items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + properties: + secretRef: + description: |- + A SecretRef is a reference to a secret key that contains the credentials + that must be used to connect to the provider. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - source + type: object functionRef: description: |- FunctionRef is a reference to the Composition Function this step should diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 379168036..1eae74c83 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -68,6 +68,7 @@ const ( errFmtApplyCD = "cannot apply composed resource %q" errFmtFetchCDConnectionDetails = "cannot fetch connection details for composed resource %q (a %s named %s)" errFmtUnmarshalPipelineStepInput = "cannot unmarshal input for Composition pipeline step %q" + errFmtGetCredentialsFromSecret = "cannot get Composition pipeline step %q credentials from Secret" errFmtRunPipelineStep = "cannot run Composition pipeline step %q" errFmtDeleteCD = "cannot delete composed resource %q (a %s named %s)" errFmtUnmarshalDesiredCD = "cannot unmarshal desired composed resource %q from RunFunctionResponse" @@ -301,6 +302,21 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur req.Input = in } + // For now we only support loading credentials from a Kubernetes secret. + if cs := fn.Credentials; cs != nil && cs.SecretRef != nil { + s := &corev1.Secret{} + if err := c.client.Get(ctx, client.ObjectKey{Namespace: cs.SecretRef.Namespace, Name: cs.SecretRef.Name}, s); err != nil { + return CompositionResult{}, errors.Wrapf(err, errFmtGetCredentialsFromSecret, fn.Step) + } + req.Credentials = &v1beta1.Credentials{ + Source: &v1beta1.Credentials_CredentialData{ + CredentialData: &v1beta1.CredentialData{ + Data: s.Data, + }, + }, + } + } + // Used to store the requirements returned at the previous iteration. var requirements *v1beta1.Requirements // Used to store the response of the function at the previous iteration. diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 400b569b4..663803899 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/meta" @@ -148,6 +149,48 @@ func TestFunctionCompose(t *testing.T) { err: errors.Wrapf(errProtoSyntax, errFmtUnmarshalPipelineStepInput, "run-cool-function"), }, }, + "GetCredentialsSecretError": { + reason: "We should return any error encountered while getting the credentials secret for a Composition Function", + params: params{ + kube: &test.MockClient{ + // Return an error when we try to get the secret. + MockGet: test.NewMockGetFn(errBoom), + }, + o: []FunctionComposerOption{ + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + return nil, nil + })), + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { + return nil, nil + })), + }, + }, + args: args{ + xr: composite.New(), + req: CompositionRequest{ + Revision: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Pipeline: []v1.PipelineStep{ + { + Step: "run-cool-function", + FunctionRef: v1.FunctionReference{Name: "cool-function"}, + Credentials: &v1.FunctionCredentials{ + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + err: errors.Wrapf(errBoom, errFmtGetCredentialsFromSecret, "run-cool-function"), + }, + }, "RunFunctionError": { reason: "We should return any error encountered while running a Composition Function", params: params{ @@ -515,7 +558,23 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return a valid CompositionResult when a 'pure Function' (i.e. patch-and-transform-less) reconcile succeeds", params: params{ kube: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "UncoolComposed"}, "")), // all names are available + // MockGet: + // test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: + // "UncoolComposed"}, "")), // all names are available + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if s, ok := obj.(*corev1.Secret); ok { + s.Data = map[string][]byte{ + "secret": []byte("password"), + } + return nil + } + + // If this isn't a secret, it's a composed resource. + // Return not found to indicate its name is available. + // TODO(negz): This is "testing through" to the + // names.NameGenerator implementation. Mock it out. + return kerrors.NewNotFound(schema.GroupResource{}, "") + }), MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, @@ -606,6 +665,13 @@ func TestFunctionCompose(t *testing.T) { { Step: "run-cool-function", FunctionRef: v1.FunctionReference{Name: "cool-function"}, + Credentials: &v1.FunctionCredentials{ + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, + }, }, }, }, diff --git a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml index 1ccd44a9e..fd0fdf44c 100644 --- a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml +++ b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml @@ -36,6 +36,11 @@ spec: results: - severity: SEVERITY_NORMAL message: "I am doing a compose!" + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: super-secret - step: detect-readiness functionRef: name: function-auto-ready diff --git a/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml b/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml new file mode 100644 index 000000000..22e00c9ac --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + namespace: crossplane-system + name: super-secret +# We don't actually use the data, we just want to make sure we can load the +# secret. +data: {} \ No newline at end of file From 2386c093684c8f24916e0d3c3456d7e7aef6d210 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 3 Apr 2024 04:38:38 -0700 Subject: [PATCH 142/370] Add comments to ResourceSelector message These are all fairly obvious, but we try to add comments for all fields. The only exceptions I've left are wrapper messages with singleton fields (e.g. MatchLabels, Resources, CredentialData). Signed-off-by: Nic Cope --- apis/apiextensions/fn/proto/v1beta1/run_function.pb.go | 8 +++++++- apis/apiextensions/fn/proto/v1beta1/run_function.proto | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index d2ef33ef0..ad7c6ce1b 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -643,8 +643,12 @@ type ResourceSelector struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // API version of resources to select. ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Kind of resources to select. + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Resources to match. + // // Types that are assignable to Match: // // *ResourceSelector_MatchName @@ -724,10 +728,12 @@ type isResourceSelector_Match interface { } type ResourceSelector_MatchName struct { + // Match the resource with this name. MatchName string `protobuf:"bytes,3,opt,name=match_name,json=matchName,proto3,oneof"` } type ResourceSelector_MatchLabels struct { + // Match all resources with these labels. MatchLabels *MatchLabels `protobuf:"bytes,4,opt,name=match_labels,json=matchLabels,proto3,oneof"` } diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1beta1/run_function.proto index bc8eba5ff..591b9310e 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.proto @@ -139,11 +139,18 @@ message Requirements { // ResourceSelector selects a group of resources, either by name or by label. message ResourceSelector { + // API version of resources to select. string api_version = 1; + + // Kind of resources to select. string kind = 2; + // Resources to match. oneof match { + // Match the resource with this name. string match_name = 3; + + // Match all resources with these labels. MatchLabels match_labels = 4; } } From 85e1a348da98743e3bf5fe0cd8ddc97d03fa5b5f Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 23 Apr 2024 18:55:12 -0700 Subject: [PATCH 143/370] Support passing multiple credentials to each function We think it's likely that a function will need more than one credential. In particular, a function could need credentials from different sources. Signed-off-by: Nic Cope --- .../fn/proto/v1beta1/run_function.pb.go | 99 ++++++++------- .../fn/proto/v1beta1/run_function.proto | 2 +- apis/apiextensions/v1/composition_common.go | 10 +- .../v1/composition_validation.go | 17 +++ .../v1/composition_validation_test.go | 62 +++++++++ .../v1/zz_generated.conversion.go | 26 ++-- .../apiextensions/v1/zz_generated.deepcopy.go | 6 +- .../zz_generated.composition_common.go | 10 +- .../v1beta1/zz_generated.deepcopy.go | 6 +- ...ns.crossplane.io_compositionrevisions.yaml | 118 ++++++++++-------- ...extensions.crossplane.io_compositions.yaml | 59 +++++---- .../composite/composition_functions.go | 16 ++- .../composite/composition_functions_test.go | 31 ++--- .../functions/setup/composition.yaml | 1 + 14 files changed, 302 insertions(+), 161 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index ad7c6ce1b..79250bc74 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -190,7 +190,7 @@ type RunFunctionRequest struct { ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Optional credentials that this Function may use to communicate with an // external system. - Credentials *Credentials `protobuf:"bytes,7,opt,name=credentials,proto3,oneof" json:"credentials,omitempty"` + Credentials map[string]*Credentials `protobuf:"bytes,7,rep,name=credentials,proto3" json:"credentials,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *RunFunctionRequest) Reset() { @@ -267,7 +267,7 @@ func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { return nil } -func (x *RunFunctionRequest) GetCredentials() *Credentials { +func (x *RunFunctionRequest) GetCredentials() map[string]*Credentials { if x != nil { return x.Credentials } @@ -1077,7 +1077,7 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x9e, 0x05, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6f, 0x22, 0x8e, 0x06, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, @@ -1104,22 +1104,29 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x63, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x43, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x02, 0x52, 0x0b, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x6c, - 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x1a, 0x6c, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x6b, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x22, 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x78, 0x74, 0x22, 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x59, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, @@ -1292,7 +1299,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte } var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity @@ -1310,50 +1317,52 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface (*Resource)(nil), // 13: apiextensions.fn.proto.v1beta1.Resource (*Result)(nil), // 14: apiextensions.fn.proto.v1beta1.Result nil, // 15: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - nil, // 16: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry - nil, // 17: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - nil, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - nil, // 19: apiextensions.fn.proto.v1beta1.State.ResourcesEntry - nil, // 20: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - (*structpb.Struct)(nil), // 21: google.protobuf.Struct - (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + nil, // 16: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + nil, // 17: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + nil, // 18: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + nil, // 20: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 21: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 22: google.protobuf.Struct + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration } var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ 7, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta 12, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State 12, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 21, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct - 21, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 22, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 22, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct 15, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - 3, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 16, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry 4, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData - 16, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + 17, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry 13, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource 11, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta 12, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State 14, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result - 21, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 22, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct 8, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements - 17, // 15: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + 18, // 15: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry 10, // 16: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels - 18, // 17: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - 22, // 18: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 19, // 17: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + 23, // 18: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration 13, // 19: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource - 19, // 20: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry - 21, // 21: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct - 20, // 22: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 20, // 20: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 22, // 21: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 21, // 22: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry 0, // 23: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready 1, // 24: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity 5, // 25: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources - 9, // 26: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector - 13, // 27: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource - 2, // 28: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest - 6, // 29: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse - 29, // [29:30] is the sub-list for method output_type - 28, // [28:29] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 3, // 26: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 9, // 27: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector + 13, // 28: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 2, // 29: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 6, // 30: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 30, // [30:31] is the sub-list for method output_type + 29, // [29:30] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } @@ -1535,7 +1544,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, NumEnums: 2, - NumMessages: 19, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1beta1/run_function.proto index 591b9310e..2a6e0b1ef 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.proto @@ -71,7 +71,7 @@ message RunFunctionRequest { // Optional credentials that this Function may use to communicate with an // external system. - optional Credentials credentials = 7; + map credentials = 7; } // Credentials that a Function may use to communicate with an external system. diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 73e5725dd..27fa223f1 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -300,7 +300,8 @@ type PipelineStep struct { Input *runtime.RawExtension `json:"input,omitempty"` // Credentials are optional credentials that the Composition Function needs. - Credentials *FunctionCredentials `json:"credentials,omitempty"` + // +optional + Credentials []FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -313,12 +314,15 @@ type FunctionReference struct { // FunctionCredentials are optional credentials that a Composition Function // needs to run. type FunctionCredentials struct { + // Name of this set of credentials. + Name string `json:"name"` + // Source of the function credentials. // +kubebuilder:validation:Enum=None;Secret Source FunctionCredentialsSource `json:"source"` - // A SecretRef is a reference to a secret key that contains the credentials - // that must be used to connect to the provider. + // A SecretRef is a reference to a secret containing credentials that should + // be supplied to the function. // +optional SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` } diff --git a/apis/apiextensions/v1/composition_validation.go b/apis/apiextensions/v1/composition_validation.go index 8dd7285fa..bdbd15961 100644 --- a/apis/apiextensions/v1/composition_validation.go +++ b/apis/apiextensions/v1/composition_validation.go @@ -69,6 +69,23 @@ func (c *Composition) validatePipeline() (errs field.ErrorList) { errs = append(errs, field.Duplicate(field.NewPath("spec", "pipeline").Index(i).Child("step"), f.Step)) } seen[f.Step] = true + + seenCred := map[string]bool{} + for j, cs := range f.Credentials { + if seenCred[cs.Name] { + errs = append(errs, field.Duplicate(field.NewPath("spec", "pipeline").Index(i).Child("credentials").Index(j).Child("name"), cs.Name)) + } + seenCred[cs.Name] = true + + switch cs.Source { + case FunctionCredentialsSourceSecret: + if cs.SecretRef == nil { + errs = append(errs, field.Required(field.NewPath("spec", "pipeline").Index(i).Child("credentials").Index(j).Child("secretRef"), "must be specified when source is Secret")) + } + case FunctionCredentialsSourceNone: + // No requirements here. + } + } } return errs } diff --git a/apis/apiextensions/v1/composition_validation_test.go b/apis/apiextensions/v1/composition_validation_test.go index 8c8202f83..787ef6a21 100644 --- a/apis/apiextensions/v1/composition_validation_test.go +++ b/apis/apiextensions/v1/composition_validation_test.go @@ -510,6 +510,68 @@ func TestCompositionValidatePipeline(t *testing.T) { }, }, }, + "InvalidDuplicateCredentialNames": { + reason: "A step's credential names must be unique", + args: args{ + comp: &Composition{ + Spec: CompositionSpec{ + Mode: ptr.To(CompositionModePipeline), + Pipeline: []PipelineStep{ + { + Step: "duplicate-creds", + Credentials: []FunctionCredentials{ + { + Name: "foo", + }, + { + Name: "foo", + }, + }, + }, + }, + }, + }, + }, + want: want{ + output: field.ErrorList{ + { + Type: field.ErrorTypeDuplicate, + Field: "spec.pipeline[0].credentials[1].name", + BadValue: "foo", + }, + }, + }, + }, + + "InvalidMissingSecretRef": { + reason: "A step's credential must specify a secretRef if its source is a secret", + args: args{ + comp: &Composition{ + Spec: CompositionSpec{ + Mode: ptr.To(CompositionModePipeline), + Pipeline: []PipelineStep{ + { + Step: "duplicate-creds", + Credentials: []FunctionCredentials{ + { + Name: "foo", + Source: FunctionCredentialsSourceSecret, + }, + }, + }, + }, + }, + }, + }, + want: want{ + output: field.ErrorList{ + { + Type: field.ErrorTypeRequired, + Field: "spec.pipeline[0].credentials[0].secretRef", + }, + }, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { diff --git a/apis/apiextensions/v1/zz_generated.conversion.go b/apis/apiextensions/v1/zz_generated.conversion.go index a96750c78..fbd5113a7 100644 --- a/apis/apiextensions/v1/zz_generated.conversion.go +++ b/apis/apiextensions/v1/zz_generated.conversion.go @@ -211,16 +211,6 @@ func (c *GeneratedRevisionSpecConverter) pV1EnvironmentSourceSelectorToPV1Enviro } return pV1EnvironmentSourceSelector } -func (c *GeneratedRevisionSpecConverter) pV1FunctionCredentialsToPV1FunctionCredentials(source *FunctionCredentials) *FunctionCredentials { - var pV1FunctionCredentials *FunctionCredentials - if source != nil { - var v1FunctionCredentials FunctionCredentials - v1FunctionCredentials.Source = FunctionCredentialsSource((*source).Source) - v1FunctionCredentials.SecretRef = c.pV1SecretReferenceToPV1SecretReference((*source).SecretRef) - pV1FunctionCredentials = &v1FunctionCredentials - } - return pV1FunctionCredentials -} func (c *GeneratedRevisionSpecConverter) pV1MapTransformToPV1MapTransform(source *MapTransform) *MapTransform { var pV1MapTransform *MapTransform if source != nil { @@ -560,6 +550,13 @@ func (c *GeneratedRevisionSpecConverter) v1EnvironmentSourceToV1EnvironmentSourc v1EnvironmentSource.Selector = c.pV1EnvironmentSourceSelectorToPV1EnvironmentSourceSelector(source.Selector) return v1EnvironmentSource } +func (c *GeneratedRevisionSpecConverter) v1FunctionCredentialsToV1FunctionCredentials(source FunctionCredentials) FunctionCredentials { + var v1FunctionCredentials FunctionCredentials + v1FunctionCredentials.Name = source.Name + v1FunctionCredentials.Source = FunctionCredentialsSource(source.Source) + v1FunctionCredentials.SecretRef = c.pV1SecretReferenceToPV1SecretReference(source.SecretRef) + return v1FunctionCredentials +} func (c *GeneratedRevisionSpecConverter) v1FunctionReferenceToV1FunctionReference(source FunctionReference) FunctionReference { var v1FunctionReference FunctionReference v1FunctionReference.Name = source.Name @@ -646,7 +643,14 @@ func (c *GeneratedRevisionSpecConverter) v1PipelineStepToV1PipelineStep(source P v1PipelineStep.Step = source.Step v1PipelineStep.FunctionRef = c.v1FunctionReferenceToV1FunctionReference(source.FunctionRef) v1PipelineStep.Input = c.pRuntimeRawExtensionToPRuntimeRawExtension(source.Input) - v1PipelineStep.Credentials = c.pV1FunctionCredentialsToPV1FunctionCredentials(source.Credentials) + var v1FunctionCredentialsList []FunctionCredentials + if source.Credentials != nil { + v1FunctionCredentialsList = make([]FunctionCredentials, len(source.Credentials)) + for i := 0; i < len(source.Credentials); i++ { + v1FunctionCredentialsList[i] = c.v1FunctionCredentialsToV1FunctionCredentials(source.Credentials[i]) + } + } + v1PipelineStep.Credentials = v1FunctionCredentialsList return v1PipelineStep } func (c *GeneratedRevisionSpecConverter) v1ReadinessCheckToV1ReadinessCheck(source ReadinessCheck) ReadinessCheck { diff --git a/apis/apiextensions/v1/zz_generated.deepcopy.go b/apis/apiextensions/v1/zz_generated.deepcopy.go index d3595b903..dcfac056f 100644 --- a/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -1122,8 +1122,10 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { } if in.Credentials != nil { in, out := &in.Credentials, &out.Credentials - *out = new(FunctionCredentials) - (*in).DeepCopyInto(*out) + *out = make([]FunctionCredentials, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index c4a8f7eff..9070d8491 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -302,7 +302,8 @@ type PipelineStep struct { Input *runtime.RawExtension `json:"input,omitempty"` // Credentials are optional credentials that the Composition Function needs. - Credentials *FunctionCredentials `json:"credentials,omitempty"` + // +optional + Credentials []FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -315,12 +316,15 @@ type FunctionReference struct { // FunctionCredentials are optional credentials that a Composition Function // needs to run. type FunctionCredentials struct { + // Name of this set of credentials. + Name string `json:"name"` + // Source of the function credentials. // +kubebuilder:validation:Enum=None;Secret Source FunctionCredentialsSource `json:"source"` - // A SecretRef is a reference to a secret key that contains the credentials - // that must be used to connect to the provider. + // A SecretRef is a reference to a secret containing credentials that should + // be supplied to the function. // +optional SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` } diff --git a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 4df4dd964..522fdc853 100644 --- a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -741,8 +741,10 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { } if in.Credentials != nil { in, out := &in.Credentials, &out.Credentials - *out = new(FunctionCredentials) - (*in).DeepCopyInto(*out) + *out = make([]FunctionCredentials, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 9477187dc..f732366db 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -994,31 +994,40 @@ spec: credentials: description: Credentials are optional credentials that the Composition Function needs. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - source: - description: Source of the function credentials. - enum: - - None - - Secret - type: string - required: - - source - type: object + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -2608,31 +2617,40 @@ spec: credentials: description: Credentials are optional credentials that the Composition Function needs. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - source: - description: Source of the function credentials. - enum: - - None - - Secret - type: string - required: - - source - type: object + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array functionRef: description: |- FunctionRef is a reference to the Composition Function this step should diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index f2235682c..de7cd052b 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -989,31 +989,40 @@ spec: credentials: description: Credentials are optional credentials that the Composition Function needs. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - source: - description: Source of the function credentials. - enum: - - None - - Secret - type: string - required: - - source - type: object + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array functionRef: description: |- FunctionRef is a reference to the Composition Function this step should diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 1eae74c83..08875fee3 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -43,6 +43,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/names" ) @@ -68,7 +69,7 @@ const ( errFmtApplyCD = "cannot apply composed resource %q" errFmtFetchCDConnectionDetails = "cannot fetch connection details for composed resource %q (a %s named %s)" errFmtUnmarshalPipelineStepInput = "cannot unmarshal input for Composition pipeline step %q" - errFmtGetCredentialsFromSecret = "cannot get Composition pipeline step %q credentials from Secret" + errFmtGetCredentialsFromSecret = "cannot get Composition pipeline step %q credential %q from Secret" errFmtRunPipelineStep = "cannot run Composition pipeline step %q" errFmtDeleteCD = "cannot delete composed resource %q (a %s named %s)" errFmtUnmarshalDesiredCD = "cannot unmarshal desired composed resource %q from RunFunctionResponse" @@ -302,13 +303,18 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur req.Input = in } - // For now we only support loading credentials from a Kubernetes secret. - if cs := fn.Credentials; cs != nil && cs.SecretRef != nil { + req.Credentials = map[string]*v1beta1.Credentials{} + for _, cs := range fn.Credentials { + // For now we only support loading credentials from secrets. + if cs.Source != v1.FunctionCredentialsSourceSecret || cs.SecretRef == nil { + continue + } + s := &corev1.Secret{} if err := c.client.Get(ctx, client.ObjectKey{Namespace: cs.SecretRef.Namespace, Name: cs.SecretRef.Name}, s); err != nil { - return CompositionResult{}, errors.Wrapf(err, errFmtGetCredentialsFromSecret, fn.Step) + return CompositionResult{}, errors.Wrapf(err, errFmtGetCredentialsFromSecret, fn.Step, cs.Name) } - req.Credentials = &v1beta1.Credentials{ + req.Credentials[cs.Name] = &v1beta1.Credentials{ Source: &v1beta1.Credentials_CredentialData{ CredentialData: &v1beta1.CredentialData{ Data: s.Data, diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 663803899..0c5d524d0 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -174,11 +174,14 @@ func TestFunctionCompose(t *testing.T) { { Step: "run-cool-function", FunctionRef: v1.FunctionReference{Name: "cool-function"}, - Credentials: &v1.FunctionCredentials{ - Source: v1.FunctionCredentialsSourceSecret, - SecretRef: &xpv1.SecretReference{ - Namespace: "default", - Name: "cool-secret", + Credentials: []v1.FunctionCredentials{ + { + Name: "cool-secret", + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, }, }, }, @@ -188,7 +191,7 @@ func TestFunctionCompose(t *testing.T) { }, }, want: want{ - err: errors.Wrapf(errBoom, errFmtGetCredentialsFromSecret, "run-cool-function"), + err: errors.Wrapf(errBoom, errFmtGetCredentialsFromSecret, "run-cool-function", "cool-secret"), }, }, "RunFunctionError": { @@ -558,9 +561,6 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return a valid CompositionResult when a 'pure Function' (i.e. patch-and-transform-less) reconcile succeeds", params: params{ kube: &test.MockClient{ - // MockGet: - // test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: - // "UncoolComposed"}, "")), // all names are available MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { if s, ok := obj.(*corev1.Secret); ok { s.Data = map[string][]byte{ @@ -665,11 +665,14 @@ func TestFunctionCompose(t *testing.T) { { Step: "run-cool-function", FunctionRef: v1.FunctionReference{Name: "cool-function"}, - Credentials: &v1.FunctionCredentials{ - Source: v1.FunctionCredentialsSourceSecret, - SecretRef: &xpv1.SecretReference{ - Namespace: "default", - Name: "cool-secret", + Credentials: []v1.FunctionCredentials{ + { + Name: "cool-secret", + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, }, }, }, diff --git a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml index fd0fdf44c..0a45449ae 100644 --- a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml +++ b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml @@ -37,6 +37,7 @@ spec: - severity: SEVERITY_NORMAL message: "I am doing a compose!" credentials: + - name: important-secret source: Secret secretRef: namespace: crossplane-system From 8e6087c13e9998139325bc8bf5ffacbad3f73762 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 29 Apr 2024 16:53:45 -0700 Subject: [PATCH 144/370] Add list-map markers for pipeline steps and credentials Signed-off-by: Nic Cope --- apis/apiextensions/v1/composition_common.go | 2 ++ apis/apiextensions/v1/composition_types.go | 2 ++ apis/apiextensions/v1/composition_validation_test.go | 1 - .../v1beta1/zz_generated.composition_common.go | 2 ++ .../apiextensions.crossplane.io_compositionrevisions.yaml | 6 ++++++ cluster/crds/apiextensions.crossplane.io_compositions.yaml | 6 ++++++ 6 files changed, 18 insertions(+), 1 deletion(-) diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 27fa223f1..4819187e8 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -301,6 +301,8 @@ type PipelineStep struct { // Credentials are optional credentials that the Composition Function needs. // +optional + // +listType=map + // +listMapKey=name Credentials []FunctionCredentials `json:"credentials,omitempty"` } diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index bbc0a66ae..395dce165 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -79,6 +79,8 @@ type CompositionSpec struct { // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the diff --git a/apis/apiextensions/v1/composition_validation_test.go b/apis/apiextensions/v1/composition_validation_test.go index 787ef6a21..ca873dc2a 100644 --- a/apis/apiextensions/v1/composition_validation_test.go +++ b/apis/apiextensions/v1/composition_validation_test.go @@ -542,7 +542,6 @@ func TestCompositionValidatePipeline(t *testing.T) { }, }, }, - "InvalidMissingSecretRef": { reason: "A step's credential must specify a secretRef if its source is a secret", args: args{ diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index 9070d8491..9fb73ad3f 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -303,6 +303,8 @@ type PipelineStep struct { // Credentials are optional credentials that the Composition Function needs. // +optional + // +listType=map + // +listMapKey=name Credentials []FunctionCredentials `json:"credentials,omitempty"` } diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index f732366db..e8a17232d 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1028,6 +1028,9 @@ spec: - source type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -2651,6 +2654,9 @@ spec: - source type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index de7cd052b..e8a037e9d 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1023,6 +1023,9 @@ spec: - source type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -1050,6 +1053,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default From f799a3a1f9668cdb31ab3d488f62d65559fd3041 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 30 Apr 2024 11:10:13 +0100 Subject: [PATCH 145/370] chore: remove unused csaManagers from ManagedFieldsUpgrader Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/claim/reconciler.go | 4 ++-- internal/controller/apiextensions/claim/syncer_ssa.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index d8836140c..061e74661 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -86,7 +86,7 @@ func ControllerName(name string) string { // managed using client-side apply, but should now be managed using server-side // apply. See https://github.com/kubernetes/kubernetes/issues/99003 for details. type ManagedFieldsUpgrader interface { - Upgrade(ctx context.Context, obj client.Object, ssaManager string, csaManagers ...string) error + Upgrade(ctx context.Context, obj client.Object, ssaManager string) error } // A CompositeSyncer binds and syncs the supplied claim with the supplied @@ -390,7 +390,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // to upgrade field managers if _this controller_ might have applied the XR // before using the default client-side apply field manager "crossplane", // but now wants to use server-side apply instead. - if err := r.managedFields.Upgrade(ctx, xr, FieldOwnerXR, "crossplane"); err != nil { + if err := r.managedFields.Upgrade(ctx, xr, FieldOwnerXR); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 25878c0af..47654402e 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -52,7 +52,7 @@ const ( type NopManagedFieldsUpgrader struct{} // Upgrade does nothing. -func (u *NopManagedFieldsUpgrader) Upgrade(_ context.Context, _ client.Object, _ string, _ ...string) error { +func (u *NopManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string) error { return nil } @@ -94,7 +94,7 @@ func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpg // // Step 5: Eventually the XR reconciler updates a field (e.g. spec.resourceRefs) // and becomes owner of that field. -func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string, _ ...string) error { +func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string) error { // The XR doesn't exist, nothing to upgrade. if !meta.WasCreated(obj) { return nil From 6534bffbca192612c4cb0604776d0f17fadd595a Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 30 Apr 2024 12:28:10 +0100 Subject: [PATCH 146/370] chore: linting Signed-off-by: Philippe Scorsolini --- internal/controller/apiextensions/claim/syncer_ssa.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 47654402e..2a36ea6cb 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -52,7 +52,7 @@ const ( type NopManagedFieldsUpgrader struct{} // Upgrade does nothing. -func (u *NopManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string) error { +func (u *NopManagedFieldsUpgrader) Upgrade(_ context.Context, _ client.Object, _ string) error { return nil } From abc3b23977e047c8a1539f601badf63b12331f18 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Fri, 15 Mar 2024 20:03:04 +0100 Subject: [PATCH 147/370] Migrate from MergeOptions to ToFieldPath Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter.go | 112 +++++++++++++++++- .../pipelinecomposition/converter_test.go | 96 ++++++++------- 2 files changed, 162 insertions(+), 46 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index a047de5a5..abdfd6c78 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -147,8 +147,8 @@ func processFunctionInput(input *Input) *runtime.RawExtension { "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", "environment": processedInput.Environment.DeepCopy(), - "patchSets": processedInput.PatchSets, - "resources": processedInput.Resources, + "patchSets": MigratePatchPolicy(processedInput.PatchSets), + "resources": MigratePatchPolicyInResources(processedInput.Resources), } return &runtime.RawExtension{ @@ -156,6 +156,114 @@ func processFunctionInput(input *Input) *runtime.RawExtension { } } +// ComposedTemplate composed template. +type ComposedTemplate struct { + v1.ComposedTemplate + + Patches []Patch `json:"patches,omitempty"` +} + +// MigratePatchPolicyInResources patches resources. +func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTemplate { + composedTemplates := []ComposedTemplate{} + + for _, resource := range resources { + composedTemplate := ComposedTemplate{} + composedTemplate.ComposedTemplate = resource + + for _, patch := range resource.Patches { + newpatch := Patch{} + newpatch.Patch = patch + + if patch.Policy != nil && patch.Policy.MergeOptions != nil { + newpatch.Policy = patchPolicy(patch.Policy) + newpatch.Patch.Policy = nil + } + composedTemplate.Patches = append(composedTemplate.Patches, newpatch) + } + composedTemplate.ComposedTemplate.Patches = nil + composedTemplates = append(composedTemplates, composedTemplate) + } + return composedTemplates +} + +// MigratePatchPolicy migrates mergeoptions. +func MigratePatchPolicy(patchset []v1.PatchSet) []NewPatchSet { + newPatchSets := []NewPatchSet{} + + for _, patchSet := range patchset { + newpatchset := NewPatchSet{} + newpatchset.Name = patchSet.Name + + for _, patch := range patchSet.Patches { + newpatch := Patch{} + newpatch.Patch = patch + + if patch.Policy != nil && patch.Policy.MergeOptions != nil { + newpatch.Policy = patchPolicy(patch.Policy) + newpatch.Patch.Policy = nil + } + + newpatchset.Patch = append(newpatchset.Patch, newpatch) + } + + newPatchSets = append(newPatchSets, newpatchset) + } + + return newPatchSets +} + +func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { + if policy == nil { + return nil + } + mergeOptions := policy.MergeOptions + toFieldPath := ptr.To(ToFieldPathPolicyReplace) + if mergeOptions != nil { + if mergeOptions.KeepMapValues != nil && *mergeOptions.KeepMapValues { + toFieldPath = ptr.To(ToFieldPathPolicyMerge) + } + if mergeOptions.AppendSlice != nil && *mergeOptions.AppendSlice { + toFieldPath = ptr.To(ToFieldPathPolicyAppendArray) + } + } + return &PatchPolicy{ + FromFieldPath: policy.FromFieldPath, + ToFieldPath: toFieldPath, + } +} + +// NewPatchSet test. +type NewPatchSet struct { + // Name of this PatchSet. + Name string `json:"name"` + + Patch []Patch `json:"patches"` +} + +// Patch patch. +type Patch struct { + v1.Patch + + Policy *PatchPolicy `json:"policy,omitempty"` +} + +// A ToFieldPathPolicy determines how to patch to a field path. +type ToFieldPathPolicy string + +// ToFieldPathPatchPolicy defines the policy for the ToFieldPath in a Patch. +const ( + ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" + ToFieldPathPolicyMerge ToFieldPathPolicy = "Merge" + ToFieldPathPolicyAppendArray ToFieldPathPolicy = "AppendArray" +) + +// PatchPolicy defines patch policy. +type PatchPolicy struct { + FromFieldPath *v1.FromFieldPathPolicy `json:"fromFieldPath,omitempty"` + ToFieldPath *ToFieldPathPolicy `json:"toFieldPath,omitempty"` +} + func setMissingPatchSetFields(patchSet v1.PatchSet) v1.PatchSet { p := []v1.Patch{} for _, patch := range patchSet.Patches { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 4ebdae0f2..bd2eb4dfb 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -291,40 +291,44 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, - "patchSets": []v1.PatchSet{ + "patchSets": []NewPatchSet{ { Name: "test-patchset", - Patches: []v1.Patch{ + Patch: []Patch{ { - Type: v1.PatchTypeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - Transforms: []v1.Transform{ - { - Type: v1.TransformTypeString, - String: &v1.StringTransform{ - Format: &stringFmt, - Type: v1.StringTransformTypeFormat, + Patch: v1.Patch{ + Type: v1.PatchTypeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + Transforms: []v1.Transform{ + { + Type: v1.TransformTypeString, + String: &v1.StringTransform{ + Format: &stringFmt, + Type: v1.StringTransformTypeFormat, + }, }, - }, - { - Type: v1.TransformTypeMath, - Math: &v1.MathTransform{ - Multiply: &intp, - Type: v1.MathTransformTypeMultiply, + { + Type: v1.TransformTypeMath, + Math: &v1.MathTransform{ + Multiply: &intp, + Type: v1.MathTransformTypeMultiply, + }, }, }, }, }, { - Type: v1.PatchTypeCombineFromComposite, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + Patch: v1.Patch{ + Type: v1.PatchTypeCombineFromComposite, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, }, - "resources": []v1.ComposedTemplate{}, + "resources": []ComposedTemplate{}, }, }, }, @@ -494,8 +498,8 @@ func TestProcessFunctionInput(t *testing.T) { "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", "environment": (*v1.EnvironmentConfiguration)(nil), - "patchSets": []v1.PatchSet{}, - "resources": []v1.ComposedTemplate{}, + "patchSets": []NewPatchSet{}, + "resources": []ComposedTemplate{}, }, }, }, @@ -559,40 +563,44 @@ func TestProcessFunctionInput(t *testing.T) { }, }, }, - "patchSets": []v1.PatchSet{ + "patchSets": []NewPatchSet{ { Name: "test-patchset", - Patches: []v1.Patch{ + Patch: []Patch{ { - Type: v1.PatchTypeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - Transforms: []v1.Transform{ - { - Type: v1.TransformTypeString, - String: &v1.StringTransform{ - Format: &stringFmt, - Type: v1.StringTransformTypeFormat, + Patch: v1.Patch{ + Type: v1.PatchTypeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + Transforms: []v1.Transform{ + { + Type: v1.TransformTypeString, + String: &v1.StringTransform{ + Format: &stringFmt, + Type: v1.StringTransformTypeFormat, + }, }, - }, - { - Type: v1.TransformTypeMath, - Math: &v1.MathTransform{ - Multiply: &intp, - Type: v1.MathTransformTypeMultiply, + { + Type: v1.TransformTypeMath, + Math: &v1.MathTransform{ + Multiply: &intp, + Type: v1.MathTransformTypeMultiply, + }, }, }, }, }, { - Type: v1.PatchTypeCombineFromComposite, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + Patch: v1.Patch{ + Type: v1.PatchTypeCombineFromComposite, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, }, - "resources": []v1.ComposedTemplate{}, + "resources": []ComposedTemplate{}, }, }, }, From 85ef0a8a1040ade59c024dcf7f5059d666ed1f1a Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Thu, 25 Apr 2024 10:38:14 +0200 Subject: [PATCH 148/370] Add unit tests and address review comments Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter.go | 105 +++++++----------- .../pipelinecomposition/converter_test.go | 76 ++++++++++++- .../beta/convert/pipelinecomposition/types.go | 28 +++++ 3 files changed, 141 insertions(+), 68 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index abdfd6c78..02d48a8b8 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -156,13 +156,6 @@ func processFunctionInput(input *Input) *runtime.RawExtension { } } -// ComposedTemplate composed template. -type ComposedTemplate struct { - v1.ComposedTemplate - - Patches []Patch `json:"patches,omitempty"` -} - // MigratePatchPolicyInResources patches resources. func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTemplate { composedTemplates := []ComposedTemplate{} @@ -170,17 +163,8 @@ func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTe for _, resource := range resources { composedTemplate := ComposedTemplate{} composedTemplate.ComposedTemplate = resource + composedTemplate.Patches = patchPatches(resource.Patches) - for _, patch := range resource.Patches { - newpatch := Patch{} - newpatch.Patch = patch - - if patch.Policy != nil && patch.Policy.MergeOptions != nil { - newpatch.Policy = patchPolicy(patch.Policy) - newpatch.Patch.Policy = nil - } - composedTemplate.Patches = append(composedTemplate.Patches, newpatch) - } composedTemplate.ComposedTemplate.Patches = nil composedTemplates = append(composedTemplates, composedTemplate) } @@ -188,64 +172,59 @@ func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTe } // MigratePatchPolicy migrates mergeoptions. -func MigratePatchPolicy(patchset []v1.PatchSet) []NewPatchSet { - newPatchSets := []NewPatchSet{} +func MigratePatchPolicy(patchset []v1.PatchSet) []PatchSet { + newPatchSets := []PatchSet{} for _, patchSet := range patchset { - newpatchset := NewPatchSet{} + newpatchset := PatchSet{} newpatchset.Name = patchSet.Name + newpatchset.Patch = patchPatches(patchSet.Patches) - for _, patch := range patchSet.Patches { - newpatch := Patch{} - newpatch.Patch = patch + newPatchSets = append(newPatchSets, newpatchset) + } - if patch.Policy != nil && patch.Policy.MergeOptions != nil { - newpatch.Policy = patchPolicy(patch.Policy) - newpatch.Patch.Policy = nil - } + return newPatchSets +} + +func patchPatches(patches []v1.Patch) []Patch { + newPatches := []Patch{} + + for _, patch := range patches { + newpatch := Patch{} + newpatch.Patch = patch - newpatchset.Patch = append(newpatchset.Patch, newpatch) + if patch.Policy != nil && patch.Policy.MergeOptions != nil { + newpatch.Policy = patchPolicy(patch.Policy) + newpatch.Patch.Policy = nil } - newPatchSets = append(newPatchSets, newpatchset) + newPatches = append(newPatches, newpatch) } - return newPatchSets + return newPatches } func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { - if policy == nil { + if policy.FromFieldPath == nil { return nil } - mergeOptions := policy.MergeOptions - toFieldPath := ptr.To(ToFieldPathPolicyReplace) - if mergeOptions != nil { - if mergeOptions.KeepMapValues != nil && *mergeOptions.KeepMapValues { - toFieldPath = ptr.To(ToFieldPathPolicyMerge) - } - if mergeOptions.AppendSlice != nil && *mergeOptions.AppendSlice { - toFieldPath = ptr.To(ToFieldPathPolicyAppendArray) - } - } - return &PatchPolicy{ + + pp := &PatchPolicy{ FromFieldPath: policy.FromFieldPath, - ToFieldPath: toFieldPath, } -} -// NewPatchSet test. -type NewPatchSet struct { - // Name of this PatchSet. - Name string `json:"name"` - - Patch []Patch `json:"patches"` -} - -// Patch patch. -type Patch struct { - v1.Patch + mo := policy.MergeOptions + if mo.KeepMapValues == nil && mo.AppendSlice == nil { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) + } else if mo.AppendSlice == nil { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) + } else if mo.KeepMapValues == nil { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) + } else { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) + } - Policy *PatchPolicy `json:"policy,omitempty"` + return pp } // A ToFieldPathPolicy determines how to patch to a field path. @@ -253,17 +232,13 @@ type ToFieldPathPolicy string // ToFieldPathPatchPolicy defines the policy for the ToFieldPath in a Patch. const ( - ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" - ToFieldPathPolicyMerge ToFieldPathPolicy = "Merge" - ToFieldPathPolicyAppendArray ToFieldPathPolicy = "AppendArray" + ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" + ToFieldPathPolicyMergeObjects ToFieldPathPolicy = "MergeObjects" + ToFieldPathPolicyMergeObjectsAppendArrays ToFieldPathPolicy = "MergeObjectsAppendArrays" + ToFieldPathPolicyForceMergeObjects ToFieldPathPolicy = "ForceMergeObjects" + ToFieldPathPolicyForceMergeObjectsAppendArrays ToFieldPathPolicy = "ForceMergeObjectsAppendArrays" ) -// PatchPolicy defines patch policy. -type PatchPolicy struct { - FromFieldPath *v1.FromFieldPathPolicy `json:"fromFieldPath,omitempty"` - ToFieldPath *ToFieldPathPolicy `json:"toFieldPath,omitempty"` -} - func setMissingPatchSetFields(patchSet v1.PatchSet) v1.PatchSet { p := []v1.Patch{} for _, patch := range patchSet.Patches { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index bd2eb4dfb..5bd03b361 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -291,7 +292,7 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, - "patchSets": []NewPatchSet{ + "patchSets": []PatchSet{ { Name: "test-patchset", Patch: []Patch{ @@ -498,7 +499,7 @@ func TestProcessFunctionInput(t *testing.T) { "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", "environment": (*v1.EnvironmentConfiguration)(nil), - "patchSets": []NewPatchSet{}, + "patchSets": []PatchSet{}, "resources": []ComposedTemplate{}, }, }, @@ -563,7 +564,7 @@ func TestProcessFunctionInput(t *testing.T) { }, }, }, - "patchSets": []NewPatchSet{ + "patchSets": []PatchSet{ { Name: "test-patchset", Patch: []Patch{ @@ -989,3 +990,72 @@ func TestSetMissingResourceFields(t *testing.T) { }) } } + +func TestPatchPolicy(t *testing.T) { + cases := map[string]struct { + reason string + args *v1.PatchPolicy + want *PatchPolicy + }{ + "PatchPolicyWithEmptyMergeOptions": { + reason: "MergeOptions is empty", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{}, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithKeepMapValuesTrue": { + reason: "KeepMapValues is true", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + "PatchPolicyWithKeepMapValuesTrueAppendSliceTrue": { + reason: "KeepMapValues and AppendSlice is true", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + AppendSlice: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays), + }, + }, + "PatchPolicyWithAppendSliceTrue": { + reason: "AppendSlice is true", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := patchPolicy(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("%s\npatchPolicy(...): -want i, +got i:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/cmd/crank/beta/convert/pipelinecomposition/types.go b/cmd/crank/beta/convert/pipelinecomposition/types.go index 001639141..88d9a2415 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/types.go +++ b/cmd/crank/beta/convert/pipelinecomposition/types.go @@ -47,3 +47,31 @@ type Input struct { // +optional Resources []v1.ComposedTemplate `json:"resources,omitempty"` } + +// PatchSet wrapper around v1.PatchSet with custom Patch. +type PatchSet struct { + // Name of this PatchSet. + Name string `json:"name"` + + Patch []Patch `json:"patches"` +} + +// ComposedTemplate wrapper around v1.ComposedTemplate with custom Patch. +type ComposedTemplate struct { + v1.ComposedTemplate + + Patches []Patch `json:"patches,omitempty"` +} + +// Patch wrapper around v1.Patch with custom PatchPolicy. +type Patch struct { + v1.Patch + + Policy *PatchPolicy `json:"policy,omitempty"` +} + +// PatchPolicy defines the policy for a patch. +type PatchPolicy struct { + FromFieldPath *v1.FromFieldPathPolicy `json:"fromFieldPath,omitempty"` + ToFieldPath *ToFieldPathPolicy `json:"toFieldPath,omitempty"` +} From 84b01bbae8974cd6d1265bdb122ad1f4a6effa7b Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Thu, 25 Apr 2024 10:53:26 +0200 Subject: [PATCH 149/370] Rewrite if-else to switch statement Signed-off-by: Sunil Shivanand --- .../beta/convert/pipelinecomposition/converter.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 02d48a8b8..34f4bd217 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -214,16 +214,16 @@ func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { } mo := policy.MergeOptions - if mo.KeepMapValues == nil && mo.AppendSlice == nil { + switch { + case mo.KeepMapValues == nil && mo.AppendSlice == nil: pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - } else if mo.AppendSlice == nil { + case mo.AppendSlice == nil: pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) - } else if mo.KeepMapValues == nil { + case mo.KeepMapValues == nil: pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) - } else { + default: pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) } - return pp } From 1f71367af6ba079da4eff64104e3fcbb8043cbdd Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Sat, 27 Apr 2024 08:02:26 +0200 Subject: [PATCH 150/370] Fix the logic and add more tests Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter.go | 56 +++++---- .../pipelinecomposition/converter_test.go | 116 +++++++++++++++--- .../beta/convert/pipelinecomposition/types.go | 12 ++ 3 files changed, 144 insertions(+), 40 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 34f4bd217..8aafde920 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -193,7 +193,7 @@ func patchPatches(patches []v1.Patch) []Patch { newpatch := Patch{} newpatch.Patch = patch - if patch.Policy != nil && patch.Policy.MergeOptions != nil { + if patch.Policy != nil { newpatch.Policy = patchPolicy(patch.Policy) newpatch.Patch.Policy = nil } @@ -205,40 +205,44 @@ func patchPatches(patches []v1.Patch) []Patch { } func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { - if policy.FromFieldPath == nil { - return nil - } - pp := &PatchPolicy{ FromFieldPath: policy.FromFieldPath, } mo := policy.MergeOptions - switch { - case mo.KeepMapValues == nil && mo.AppendSlice == nil: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - case mo.AppendSlice == nil: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) - case mo.KeepMapValues == nil: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) - default: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) + if mo == nil { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyReplace) + } else { + switch { + case mo.KeepMapValues == nil && mo.AppendSlice == nil: + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) + case mo.AppendSlice == nil: + if *mo.KeepMapValues { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) + } else { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) + } + case mo.KeepMapValues == nil: + if *mo.AppendSlice { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) + } else { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) + } + case *mo.AppendSlice == *mo.KeepMapValues: + if *mo.AppendSlice { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) + } else { + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) + } + case *mo.AppendSlice: + pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) + case *mo.KeepMapValues: + pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) + } } return pp } -// A ToFieldPathPolicy determines how to patch to a field path. -type ToFieldPathPolicy string - -// ToFieldPathPatchPolicy defines the policy for the ToFieldPath in a Patch. -const ( - ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" - ToFieldPathPolicyMergeObjects ToFieldPathPolicy = "MergeObjects" - ToFieldPathPolicyMergeObjectsAppendArrays ToFieldPathPolicy = "MergeObjectsAppendArrays" - ToFieldPathPolicyForceMergeObjects ToFieldPathPolicy = "ForceMergeObjects" - ToFieldPathPolicyForceMergeObjectsAppendArrays ToFieldPathPolicy = "ForceMergeObjectsAppendArrays" -) - func setMissingPatchSetFields(patchSet v1.PatchSet) v1.PatchSet { p := []v1.Patch{} for _, patch := range patchSet.Patches { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 5bd03b361..8cc29375d 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -991,25 +991,34 @@ func TestSetMissingResourceFields(t *testing.T) { } } +/* +# MergeOptions appendSlice keepMapValues policy.toFieldPath +1 nil N/A N/A Replace +2 non-nil nil or false true MergeObjects +3 non-nil true nil or false ForceMergeObjectsAppendArrays +4 non-nil nil or false nil or false ForceMergeObjects +5 non-nil true true MergeObjectsAppendArrays +*/ + func TestPatchPolicy(t *testing.T) { cases := map[string]struct { reason string args *v1.PatchPolicy want *PatchPolicy }{ - "PatchPolicyWithEmptyMergeOptions": { - reason: "MergeOptions is empty", + "PatchPolicyWithNilMergeOptions": { // case 1 + reason: "MergeOptions is nil", args: &v1.PatchPolicy{ FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), - MergeOptions: &commonv1.MergeOptions{}, + MergeOptions: nil, }, want: &PatchPolicy{ FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), - ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + ToFieldPath: ptr.To(ToFieldPathPolicyReplace), }, }, - "PatchPolicyWithKeepMapValuesTrue": { - reason: "KeepMapValues is true", + "PatchPolicyWithKeepMapValuesTrueAppendSliceNil": { + reason: "AppendSlice is nil && KeepMapValues is true", // case 2 args: &v1.PatchPolicy{ FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), MergeOptions: &commonv1.MergeOptions{ @@ -1021,22 +1030,22 @@ func TestPatchPolicy(t *testing.T) { ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), }, }, - "PatchPolicyWithKeepMapValuesTrueAppendSliceTrue": { - reason: "KeepMapValues and AppendSlice is true", + "PatchPolicyWithKeepMapValuesTrueAppendSliceFalse": { + reason: "AppendSlice is false && KeepMapValues is true", // case 2 args: &v1.PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), MergeOptions: &commonv1.MergeOptions{ KeepMapValues: ptr.To(true), - AppendSlice: ptr.To(true), + AppendSlice: ptr.To(false), }, }, want: &PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), - ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays), + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), }, }, - "PatchPolicyWithAppendSliceTrue": { - reason: "AppendSlice is true", + "PatchPolicyWithTrueAppendSliceInMergeOptions": { // case 3 + reason: "AppendSlice is true && KeepMapValues is nil", args: &v1.PatchPolicy{ FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), MergeOptions: &commonv1.MergeOptions{ @@ -1048,6 +1057,85 @@ func TestPatchPolicy(t *testing.T) { ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), }, }, + "PatchPolicyWithTrueAppendSliceFalseKeepMapValuesInMergeOptions": { // case 3 + reason: "AppendSlice is true && KeepMapValues is false", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + KeepMapValues: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, + }, + "PatchPolicyWithEmptyMergeOptions": { // case 4 + reason: "Both AppendSlice and KeepMapValues are nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{}, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithNilKeepMapValuesInMergeOptions": { // case 4 + reason: "AppendSlice is false and KeepMapValues is nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithNilAppendSliceInMergeOptions": { + reason: "AppendSlice is nil and KeepMapValues is false", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithBothKeepMapValuesAndAppendSliceFalse": { + reason: "Both KeepMapValues and AppendSlice is false", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(false), + AppendSlice: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithKeepMapValuesTrueAppendSliceTrue": { // case 5 + reason: "Both KeepMapValues and AppendSlice is true", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + AppendSlice: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays), + }, + }, } for name, tc := range cases { diff --git a/cmd/crank/beta/convert/pipelinecomposition/types.go b/cmd/crank/beta/convert/pipelinecomposition/types.go index 88d9a2415..3a4a97708 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/types.go +++ b/cmd/crank/beta/convert/pipelinecomposition/types.go @@ -70,6 +70,18 @@ type Patch struct { Policy *PatchPolicy `json:"policy,omitempty"` } +// A ToFieldPathPolicy determines how to patch to a field path. +type ToFieldPathPolicy string + +// ToFieldPathPatchPolicy defines the policy for the ToFieldPath in a Patch. +const ( + ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" + ToFieldPathPolicyMergeObjects ToFieldPathPolicy = "MergeObjects" + ToFieldPathPolicyMergeObjectsAppendArrays ToFieldPathPolicy = "MergeObjectsAppendArrays" + ToFieldPathPolicyForceMergeObjects ToFieldPathPolicy = "ForceMergeObjects" + ToFieldPathPolicyForceMergeObjectsAppendArrays ToFieldPathPolicy = "ForceMergeObjectsAppendArrays" +) + // PatchPolicy defines the policy for a patch. type PatchPolicy struct { FromFieldPath *v1.FromFieldPathPolicy `json:"fromFieldPath,omitempty"` From e02ec2e8d84432469cc7299112d40eac6496b942 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Sat, 27 Apr 2024 09:00:29 +0200 Subject: [PATCH 151/370] Add another test to verify nil FromFieldPath Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter_test.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 8cc29375d..733567682 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -1017,6 +1017,17 @@ func TestPatchPolicy(t *testing.T) { ToFieldPath: ptr.To(ToFieldPathPolicyReplace), }, }, + "PatchPolicyWithNilFromFieldPath": { // case 1 + reason: "MergeOptions is nil", + args: &v1.PatchPolicy{ + FromFieldPath: nil, + MergeOptions: nil, + }, + want: &PatchPolicy{ + FromFieldPath: nil, + ToFieldPath: ptr.To(ToFieldPathPolicyReplace), + }, + }, "PatchPolicyWithKeepMapValuesTrueAppendSliceNil": { reason: "AppendSlice is nil && KeepMapValues is true", // case 2 args: &v1.PatchPolicy{ @@ -1111,14 +1122,14 @@ func TestPatchPolicy(t *testing.T) { "PatchPolicyWithBothKeepMapValuesAndAppendSliceFalse": { reason: "Both KeepMapValues and AppendSlice is false", args: &v1.PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + FromFieldPath: nil, MergeOptions: &commonv1.MergeOptions{ KeepMapValues: ptr.To(false), AppendSlice: ptr.To(false), }, }, want: &PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + FromFieldPath: nil, ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), }, }, From 141112250a7511a2fc414a6b21ccb5fa9a4bf4b9 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Sat, 27 Apr 2024 14:39:18 +0200 Subject: [PATCH 152/370] Keep the dupword linter happy Signed-off-by: Sunil Shivanand --- .../beta/convert/pipelinecomposition/converter_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 733567682..c87748206 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -992,12 +992,14 @@ func TestSetMissingResourceFields(t *testing.T) { } /* -# MergeOptions appendSlice keepMapValues policy.toFieldPath -1 nil N/A N/A Replace +# MergeOptions appendSlice keepMapValues policy.toFieldPath +1 nil N/A N/A* Replace 2 non-nil nil or false true MergeObjects 3 non-nil true nil or false ForceMergeObjectsAppendArrays 4 non-nil nil or false nil or false ForceMergeObjects -5 non-nil true true MergeObjectsAppendArrays +5 non-nil true true* MergeObjectsAppendArrays + + * keeps dupword linter happy */ func TestPatchPolicy(t *testing.T) { From 17f22b4627ddbf6c4039d6fe6a9281c3c4b709f3 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Sat, 27 Apr 2024 20:52:53 +0200 Subject: [PATCH 153/370] Rejig the code to keep the logic simple to follow Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter.go | 71 +++++++++++-------- .../pipelinecomposition/converter_test.go | 37 +++++----- 2 files changed, 57 insertions(+), 51 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 8aafde920..75221eecf 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -27,6 +27,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" + commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) @@ -205,42 +207,51 @@ func patchPatches(patches []v1.Patch) []Patch { } func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { - pp := &PatchPolicy{ + to := getToFieldPathPolicy(policy.MergeOptions) + + if to == nil && policy.FromFieldPath == nil { + // neither To nor From has been set, just return nil to use defaults for + // everything + return nil + } + + return &PatchPolicy{ FromFieldPath: policy.FromFieldPath, + ToFieldPath: to, } +} - mo := policy.MergeOptions +func getToFieldPathPolicy(mo *commonv1.MergeOptions) *ToFieldPathPolicy { if mo == nil { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyReplace) - } else { - switch { - case mo.KeepMapValues == nil && mo.AppendSlice == nil: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - case mo.AppendSlice == nil: - if *mo.KeepMapValues { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) - } else { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - } - case mo.KeepMapValues == nil: - if *mo.AppendSlice { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) - } else { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - } - case *mo.AppendSlice == *mo.KeepMapValues: - if *mo.AppendSlice { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) - } else { - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjects) - } - case *mo.AppendSlice: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) - case *mo.KeepMapValues: - pp.ToFieldPath = ptr.To(ToFieldPathPolicyMergeObjects) + // No merge options at all, default to nil which will mean Replace + return nil + } + + if isTrue(mo.KeepMapValues) { + if isNilOrFalse(mo.AppendSlice) { + // { appendSlice: nil/false, keepMapValues: true} + return ptr.To(ToFieldPathPolicyMergeObjects) } + + // { appendSlice: true, keepMapValues: true } + return ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) } - return pp + + if isTrue(mo.AppendSlice) { + // { appendSlice: true, keepMapValues: nil/false } + return ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) + } + + // { appendSlice: nil/false, keepMapValues: nil/false } + return ptr.To(ToFieldPathPolicyForceMergeObjects) +} + +func isNilOrFalse(b *bool) bool { + return b == nil || !*b +} + +func isTrue(b *bool) bool { + return b != nil && *b } func setMissingPatchSetFields(patchSet v1.PatchSet) v1.PatchSet { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index c87748206..9884436cb 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -992,14 +992,12 @@ func TestSetMissingResourceFields(t *testing.T) { } /* -# MergeOptions appendSlice keepMapValues policy.toFieldPath -1 nil N/A N/A* Replace -2 non-nil nil or false true MergeObjects -3 non-nil true nil or false ForceMergeObjectsAppendArrays -4 non-nil nil or false nil or false ForceMergeObjects -5 non-nil true true* MergeObjectsAppendArrays - - * keeps dupword linter happy +# MergeOptions appendSlice keepMapValues policy.toFieldPath +1 nil N/A N/A nil (defaults to Replace) +2 non-nil nil or false true MergeObjects +3 non-nil true nil or false ForceMergeObjectsAppendArrays +4 non-nil nil or false nil or false ForceMergeObjects +5 non-nil true true MergeObjectsAppendArrays */ func TestPatchPolicy(t *testing.T) { @@ -1008,26 +1006,23 @@ func TestPatchPolicy(t *testing.T) { args *v1.PatchPolicy want *PatchPolicy }{ - "PatchPolicyWithNilMergeOptions": { // case 1 - reason: "MergeOptions is nil", + "PatchPolicyWithNilMergeOptionsAndFromFieldPath": { // case 1 + reason: "MergeOptions and FromFieldPath are nil", args: &v1.PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + FromFieldPath: nil, MergeOptions: nil, }, - want: &PatchPolicy{ - FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), - ToFieldPath: ptr.To(ToFieldPathPolicyReplace), - }, + want: nil, }, - "PatchPolicyWithNilFromFieldPath": { // case 1 + "PatchPolicyWithNilMergeOptions": { // case 1 reason: "MergeOptions is nil", args: &v1.PatchPolicy{ - FromFieldPath: nil, + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), MergeOptions: nil, }, want: &PatchPolicy{ - FromFieldPath: nil, - ToFieldPath: ptr.To(ToFieldPathPolicyReplace), + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: nil, }, }, "PatchPolicyWithKeepMapValuesTrueAppendSliceNil": { @@ -1108,7 +1103,7 @@ func TestPatchPolicy(t *testing.T) { ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), }, }, - "PatchPolicyWithNilAppendSliceInMergeOptions": { + "PatchPolicyWithNilAppendSliceInMergeOptions": { // case 4 reason: "AppendSlice is nil and KeepMapValues is false", args: &v1.PatchPolicy{ FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), @@ -1121,7 +1116,7 @@ func TestPatchPolicy(t *testing.T) { ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), }, }, - "PatchPolicyWithBothKeepMapValuesAndAppendSliceFalse": { + "PatchPolicyWithBothKeepMapValuesAndAppendSliceFalse": { // case 4 reason: "Both KeepMapValues and AppendSlice is false", args: &v1.PatchPolicy{ FromFieldPath: nil, From c5ff33cafba46004ed8c3c8e1e0a71933e2b57f1 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Sat, 27 Apr 2024 21:05:38 +0200 Subject: [PATCH 154/370] fix dupword linter error Signed-off-by: Sunil Shivanand --- cmd/crank/beta/convert/pipelinecomposition/converter_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 9884436cb..d16b6ac21 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -993,11 +993,11 @@ func TestSetMissingResourceFields(t *testing.T) { /* # MergeOptions appendSlice keepMapValues policy.toFieldPath -1 nil N/A N/A nil (defaults to Replace) +1 nil N/A n/A nil (defaults to Replace) 2 non-nil nil or false true MergeObjects 3 non-nil true nil or false ForceMergeObjectsAppendArrays 4 non-nil nil or false nil or false ForceMergeObjects -5 non-nil true true MergeObjectsAppendArrays +5 non-nil true True MergeObjectsAppendArrays */ func TestPatchPolicy(t *testing.T) { From db3edceb7f5abff23961b15c6489db040535f182 Mon Sep 17 00:00:00 2001 From: Sunil Shivanand Date: Tue, 30 Apr 2024 10:27:48 +0200 Subject: [PATCH 155/370] Address review comments Signed-off-by: Sunil Shivanand --- .../convert/pipelinecomposition/converter.go | 29 +++++++++++-------- .../pipelinecomposition/converter_test.go | 6 ++-- .../beta/convert/pipelinecomposition/types.go | 2 +- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 75221eecf..450314ddf 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -149,7 +149,7 @@ func processFunctionInput(input *Input) *runtime.RawExtension { "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", "environment": processedInput.Environment.DeepCopy(), - "patchSets": MigratePatchPolicy(processedInput.PatchSets), + "patchSets": MigratePatchPolicyInPatchSets(processedInput.PatchSets), "resources": MigratePatchPolicyInResources(processedInput.Resources), } @@ -158,29 +158,31 @@ func processFunctionInput(input *Input) *runtime.RawExtension { } } -// MigratePatchPolicyInResources patches resources. +// MigratePatchPolicyInResources processes all the patches in the given resources to migrate their patch policies. func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTemplate { composedTemplates := []ComposedTemplate{} for _, resource := range resources { composedTemplate := ComposedTemplate{} composedTemplate.ComposedTemplate = resource - composedTemplate.Patches = patchPatches(resource.Patches) - + composedTemplate.Patches = migratePatches(resource.Patches) + // Conversion function above overrides the patches in the new type, + // so after conversion we set the underlying patches to nil to make sure + // there's no conflict in the serialized output. composedTemplate.ComposedTemplate.Patches = nil composedTemplates = append(composedTemplates, composedTemplate) } return composedTemplates } -// MigratePatchPolicy migrates mergeoptions. -func MigratePatchPolicy(patchset []v1.PatchSet) []PatchSet { +// MigratePatchPolicyInPatchSets processes all the patches in the given patch set to migrate their patch policies. +func MigratePatchPolicyInPatchSets(patchset []v1.PatchSet) []PatchSet { newPatchSets := []PatchSet{} for _, patchSet := range patchset { newpatchset := PatchSet{} newpatchset.Name = patchSet.Name - newpatchset.Patch = patchPatches(patchSet.Patches) + newpatchset.Patches = migratePatches(patchSet.Patches) newPatchSets = append(newPatchSets, newpatchset) } @@ -188,7 +190,7 @@ func MigratePatchPolicy(patchset []v1.PatchSet) []PatchSet { return newPatchSets } -func patchPatches(patches []v1.Patch) []Patch { +func migratePatches(patches []v1.Patch) []Patch { newPatches := []Patch{} for _, patch := range patches { @@ -196,7 +198,10 @@ func patchPatches(patches []v1.Patch) []Patch { newpatch.Patch = patch if patch.Policy != nil { - newpatch.Policy = patchPolicy(patch.Policy) + newpatch.Policy = migratePatchPolicy(patch.Policy) + // Conversion function above overrides the patch policy in the new type, + // so after conversion we set underlying policy to nil to make ensure + // there's no conflict in the serialized output. newpatch.Patch.Policy = nil } @@ -206,8 +211,8 @@ func patchPatches(patches []v1.Patch) []Patch { return newPatches } -func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { - to := getToFieldPathPolicy(policy.MergeOptions) +func migratePatchPolicy(policy *v1.PatchPolicy) *PatchPolicy { + to := migrateMergeOptions(policy.MergeOptions) if to == nil && policy.FromFieldPath == nil { // neither To nor From has been set, just return nil to use defaults for @@ -221,7 +226,7 @@ func patchPolicy(policy *v1.PatchPolicy) *PatchPolicy { } } -func getToFieldPathPolicy(mo *commonv1.MergeOptions) *ToFieldPathPolicy { +func migrateMergeOptions(mo *commonv1.MergeOptions) *ToFieldPathPolicy { if mo == nil { // No merge options at all, default to nil which will mean Replace return nil diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index d16b6ac21..e7f068f7c 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -295,7 +295,7 @@ func TestConvertPnTToPipeline(t *testing.T) { "patchSets": []PatchSet{ { Name: "test-patchset", - Patch: []Patch{ + Patches: []Patch{ { Patch: v1.Patch{ Type: v1.PatchTypeFromCompositeFieldPath, @@ -567,7 +567,7 @@ func TestProcessFunctionInput(t *testing.T) { "patchSets": []PatchSet{ { Name: "test-patchset", - Patch: []Patch{ + Patches: []Patch{ { Patch: v1.Patch{ Type: v1.PatchTypeFromCompositeFieldPath, @@ -1148,7 +1148,7 @@ func TestPatchPolicy(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - got := patchPolicy(tc.args) + got := migratePatchPolicy(tc.args) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("%s\npatchPolicy(...): -want i, +got i:\n%s", tc.reason, diff) } diff --git a/cmd/crank/beta/convert/pipelinecomposition/types.go b/cmd/crank/beta/convert/pipelinecomposition/types.go index 3a4a97708..2fd10602e 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/types.go +++ b/cmd/crank/beta/convert/pipelinecomposition/types.go @@ -53,7 +53,7 @@ type PatchSet struct { // Name of this PatchSet. Name string `json:"name"` - Patch []Patch `json:"patches"` + Patches []Patch `json:"patches"` } // ComposedTemplate wrapper around v1.ComposedTemplate with custom Patch. From 32bc63b03061556971053c56bbd67bc01599e55b Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 30 Apr 2024 16:25:09 +0100 Subject: [PATCH 156/370] docs(trace): explicitly mention -o wide additional info Signed-off-by: Philippe Scorsolini --- cmd/crank/beta/trace/trace.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 2f2a89a7e..30d7071a0 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -85,7 +85,8 @@ Examples: # Trace a MyKind resource (mykinds.example.org/v1alpha1) named 'my-res' in the namespace 'my-ns' crossplane beta trace mykind my-res -n my-ns - # Output wide format, showing full errors and condition messages + # Output wide format, showing full errors and condition messages, and other useful info + # depending on the target type, e.g. composed resources names for XR/XRCs or image used for XPKGs crossplane beta trace mykind my-res -n my-ns -o wide # Show connection secrets in the output From e3a74f1c0496f7c5e8fbe85ed6b2ec1dd5b58b43 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 30 Apr 2024 17:00:21 +0100 Subject: [PATCH 157/370] review: wording Co-authored-by: Jared Watts Signed-off-by: Philippe Scorsolini --- cmd/crank/beta/trace/trace.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 30d7071a0..943f6d732 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -86,7 +86,7 @@ Examples: crossplane beta trace mykind my-res -n my-ns # Output wide format, showing full errors and condition messages, and other useful info - # depending on the target type, e.g. composed resources names for XR/XRCs or image used for XPKGs + # depending on the target type, e.g. composed resources names for composite resources or image used for packages crossplane beta trace mykind my-res -n my-ns -o wide # Show connection secrets in the output From 944b00e838be89d849aa4be698892a61e3570bca Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Wed, 1 May 2024 09:25:30 +0200 Subject: [PATCH 158/370] Convert mergeOptions for environment patches Signed-off-by: Jared Watts --- .../convert/pipelinecomposition/converter.go | 40 ++- .../pipelinecomposition/converter_test.go | 302 ++++++++++++++++-- .../beta/convert/pipelinecomposition/types.go | 20 +- 3 files changed, 332 insertions(+), 30 deletions(-) diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 450314ddf..174c1e5c5 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -148,7 +148,7 @@ func processFunctionInput(input *Input) *runtime.RawExtension { inputType := map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": processedInput.Environment.DeepCopy(), + "environment": MigratePatchPolicyInEnvironment(processedInput.Environment.DeepCopy()), "patchSets": MigratePatchPolicyInPatchSets(processedInput.PatchSets), "resources": MigratePatchPolicyInResources(processedInput.Resources), } @@ -190,6 +190,18 @@ func MigratePatchPolicyInPatchSets(patchset []v1.PatchSet) []PatchSet { return newPatchSets } +// MigratePatchPolicyInEnvironment processes all the patches in the given +// environment configuration to migrate their patch policies. +func MigratePatchPolicyInEnvironment(ec *v1.EnvironmentConfiguration) *Environment { + if ec == nil || len(ec.Patches) == 0 { + return nil + } + + return &Environment{ + Patches: migrateEnvPatches(ec.Patches), + } +} + func migratePatches(patches []v1.Patch) []Patch { newPatches := []Patch{} @@ -200,7 +212,7 @@ func migratePatches(patches []v1.Patch) []Patch { if patch.Policy != nil { newpatch.Policy = migratePatchPolicy(patch.Policy) // Conversion function above overrides the patch policy in the new type, - // so after conversion we set underlying policy to nil to make ensure + // so after conversion we set underlying policy to nil to make sure // there's no conflict in the serialized output. newpatch.Patch.Policy = nil } @@ -211,6 +223,27 @@ func migratePatches(patches []v1.Patch) []Patch { return newPatches } +func migrateEnvPatches(envPatches []v1.EnvironmentPatch) []EnvironmentPatch { + newEnvPatches := []EnvironmentPatch{} + + for _, envPatch := range envPatches { + newEnvPatch := EnvironmentPatch{} + newEnvPatch.EnvironmentPatch = envPatch + + if envPatch.Policy != nil { + newEnvPatch.Policy = migratePatchPolicy(envPatch.Policy) + // Conversion function above overrides the patch policy in the new type, + // so after conversion we set underlying policy to nil to make sure + // there's no conflict in the serialized output. + newEnvPatch.EnvironmentPatch.Policy = nil + } + + newEnvPatches = append(newEnvPatches, newEnvPatch) + } + + return newEnvPatches +} + func migratePatchPolicy(policy *v1.PatchPolicy) *PatchPolicy { to := migrateMergeOptions(policy.MergeOptions) @@ -226,6 +259,9 @@ func migratePatchPolicy(policy *v1.PatchPolicy) *PatchPolicy { } } +// migrateMergeOptions implements the conversion of mergeOptions to the new +// toFieldPath policy. The conversion logic is described in +// https://github.com/crossplane-contrib/function-patch-and-transform/?tab=readme-ov-file#mergeoptions-replaced-by-tofieldpath. func migrateMergeOptions(mo *commonv1.MergeOptions) *ToFieldPathPolicy { if mo == nil { // No merge options at all, default to nil which will mean Replace diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index e7f068f7c..f8406626a 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -181,6 +181,12 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, }, { Type: v1.PatchTypeCombineFromComposite, @@ -224,6 +230,11 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &v1.PatchPolicy{ + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + }, + }, }, }, Policy: &commonv1.Policy{ @@ -262,33 +273,40 @@ func TestConvertPnTToPipeline(t *testing.T) { Object: map[string]any{ "apiVersion": string("pt.fn.crossplane.io/v1beta1"), "kind": string("Resources"), - "environment": &v1.EnvironmentConfiguration{ - Patches: []v1.EnvironmentPatch{ + "environment": &Environment{ + Patches: []EnvironmentPatch{ { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - Transforms: []v1.Transform{ - { - Type: v1.TransformTypeString, - String: &v1.StringTransform{ - Format: &stringFmt, - Type: v1.StringTransformTypeFormat, + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + Transforms: []v1.Transform{ + { + Type: v1.TransformTypeString, + String: &v1.StringTransform{ + Format: &stringFmt, + Type: v1.StringTransformTypeFormat, + }, }, - }, - { - Type: v1.TransformTypeMath, - Math: &v1.MathTransform{ - Multiply: &intp, - Type: v1.MathTransformTypeMultiply, + { + Type: v1.TransformTypeMath, + Math: &v1.MathTransform{ + Multiply: &intp, + Type: v1.MathTransformTypeMultiply, + }, }, }, }, + Policy: &PatchPolicy{ + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, }, }, }, @@ -318,6 +336,10 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, }, { Patch: v1.Patch{ @@ -498,7 +520,7 @@ func TestProcessFunctionInput(t *testing.T) { Object: map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": (*v1.EnvironmentConfiguration)(nil), + "environment": (*Environment)(nil), "patchSets": []PatchSet{}, "resources": []ComposedTemplate{}, }, @@ -555,12 +577,14 @@ func TestProcessFunctionInput(t *testing.T) { Object: map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": &v1.EnvironmentConfiguration{ - Patches: []v1.EnvironmentPatch{ + "environment": &Environment{ + Patches: []EnvironmentPatch{ { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, @@ -991,6 +1015,232 @@ func TestSetMissingResourceFields(t *testing.T) { } } +func TestMigratePatchPolicyInResources(t *testing.T) { + cases := map[string]struct { + reason string + args []v1.ComposedTemplate + want []ComposedTemplate + }{ + "ResourcesHasSimplePatches": { + reason: "Composed Resources has simple patches", + args: []v1.ComposedTemplate{ + { + Name: ptr.To("resource-0"), + Patches: []v1.Patch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + }, + want: []ComposedTemplate{ + { + ComposedTemplate: v1.ComposedTemplate{ + Name: ptr.To("resource-0"), + Patches: nil, + }, + Patches: []Patch{ + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInResources(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInResources() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestMigratePatchPolicyInPatchSets(t *testing.T) { + cases := map[string]struct { + reason string + args []v1.PatchSet + want []PatchSet + }{ + "PatchSetHasSimplePatches": { + reason: "PatchSet has simple patches", + args: []v1.PatchSet{ + { + Name: "patchset-0", + Patches: []v1.Patch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + }, + want: []PatchSet{ + { + Name: "patchset-0", + Patches: []Patch{ + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInPatchSets(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInPatchSets() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestMigratePatchPolicyInEnvironment(t *testing.T) { + cases := map[string]struct { + reason string + args *v1.EnvironmentConfiguration + want *Environment + }{ + "EnvironmentNil": { + reason: "Environment is nil", + args: nil, + want: nil, + }, + "EnvironmentHasNoPatches": { + reason: "Environment has no patches", + args: &v1.EnvironmentConfiguration{Patches: []v1.EnvironmentPatch{}}, + want: nil, + }, + "EnvironmentHasSimplePatches": { + reason: "Environment has simple patches", + args: &v1.EnvironmentConfiguration{ + Patches: []v1.EnvironmentPatch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + want: &Environment{ + Patches: []EnvironmentPatch{ + { + EnvironmentPatch: v1.EnvironmentPatch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + EnvironmentPatch: v1.EnvironmentPatch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInEnvironment(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInEnvironment() mismatch (-want +got):\n%s", diff) + } + }) + } +} + /* # MergeOptions appendSlice keepMapValues policy.toFieldPath 1 nil N/A n/A nil (defaults to Replace) diff --git a/cmd/crank/beta/convert/pipelinecomposition/types.go b/cmd/crank/beta/convert/pipelinecomposition/types.go index 2fd10602e..1ae9bb2ae 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/types.go +++ b/cmd/crank/beta/convert/pipelinecomposition/types.go @@ -18,9 +18,13 @@ package pipelinecomposition import v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" -// Input represents the input to the patch-and-transform function -// This struct is copied from function patch and transform, as we can't import it directly +// Input represents the input to the patch-and-transform function. This struct +// originates from function patch and transform, as we can't import it directly // https://github.com/crossplane-contrib/function-patch-and-transform/blob/main/input/v1beta1/resources.go +// Note that it does not exactly match the target type with full fidelity. +// This type is used during the processing and conversion of the given input, +// but the final converted output is written in an unstructured manner without a +// static type definition for more flexibility. type Input struct { // PatchSets define a named set of patches that may be included by any // resource in this Composition. PatchSets cannot themselves refer to other @@ -70,6 +74,18 @@ type Patch struct { Policy *PatchPolicy `json:"policy,omitempty"` } +// Environment represents the Composition environment. +type Environment struct { + Patches []EnvironmentPatch `json:"patches,omitempty"` +} + +// EnvironmentPatch wrapper around v1.EnvironmentPatch with custom PatchPolicy. +type EnvironmentPatch struct { + v1.EnvironmentPatch + + Policy *PatchPolicy `json:"policy,omitempty"` +} + // A ToFieldPathPolicy determines how to patch to a field path. type ToFieldPathPolicy string From f66104f171b493aad583154517489e122cb483b5 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 31 Jan 2024 14:06:51 +0100 Subject: [PATCH 159/370] controller/pkg: add version and constraint context to invalid dependencies Signed-off-by: Dr. Stefan Schimanski --- internal/controller/pkg/revision/dependency.go | 12 +++++++++--- internal/controller/pkg/revision/dependency_test.go | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index c70772a16..86558aeca 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -18,6 +18,8 @@ package revision import ( "context" + "fmt" + "strings" "github.com/Masterminds/semver" "github.com/google/go-containerregistry/pkg/name" @@ -41,7 +43,7 @@ const ( errNotMeta = "meta type is not a valid package" errGetOrCreateLock = "cannot get or create lock" errInitDAG = "cannot initialize dependency graph from the packages in the lock" - errFmtIncompatibleDependency = "incompatible dependencies: %+v" + errFmtIncompatibleDependency = "incompatible dependencies: %s" errFmtMissingDependencies = "missing dependencies: %+v" errDependencyNotInGraph = "dependency is not present in graph" errDependencyNotLockPackage = "dependency in graph is not a lock package" @@ -207,12 +209,16 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje return found, installed, invalid, err } if !c.Check(v) { - invalidDeps = append(invalidDeps, lp.Identifier()) + s := fmt.Sprintf("%s@%s", lp.Identifier(), lp.Version) + if dep.Constraints != "" { + s += " with " + strings.TrimSpace(dep.Constraints) + } + invalidDeps = append(invalidDeps, s) } } invalid = len(invalidDeps) if invalid > 0 { - return found, installed, invalid, errors.Errorf(errFmtIncompatibleDependency, invalidDeps) + return found, installed, invalid, errors.Errorf(errFmtIncompatibleDependency, strings.Join(invalidDeps, "; ")) } return found, installed, invalid, nil } diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index d0dc45daf..82bab83ee 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -433,7 +433,7 @@ func TestResolve(t *testing.T) { total: 3, installed: 3, invalid: 2, - err: errors.Errorf(errFmtIncompatibleDependency, []string{"not-here-1", "not-here-2"}), + err: errors.Errorf(errFmtIncompatibleDependency, "not-here-1@v0.0.1 with >=v0.1.0; not-here-2@v0.0.1 with >=v0.1.0"), }, }, "SuccessfulSelfExistValidDependencies": { From b6f942d0c15a4167ac7e98ae12d3f0ca74d342b6 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Thu, 2 May 2024 14:52:28 +0200 Subject: [PATCH 160/370] controller/pkg: clarify which packages and constraints are incompatible for an invalid dependency Signed-off-by: Jared Watts --- internal/controller/pkg/revision/dependency.go | 4 ++-- internal/controller/pkg/revision/dependency_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 86558aeca..9da209088 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -209,9 +209,9 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje return found, installed, invalid, err } if !c.Check(v) { - s := fmt.Sprintf("%s@%s", lp.Identifier(), lp.Version) + s := fmt.Sprintf("existing package %s@%s", lp.Identifier(), lp.Version) if dep.Constraints != "" { - s += " with " + strings.TrimSpace(dep.Constraints) + s = fmt.Sprintf("%s is incompatible with constraint %s", s, strings.TrimSpace(dep.Constraints)) } invalidDeps = append(invalidDeps, s) } diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index 82bab83ee..9b3dd0ee2 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -433,7 +433,7 @@ func TestResolve(t *testing.T) { total: 3, installed: 3, invalid: 2, - err: errors.Errorf(errFmtIncompatibleDependency, "not-here-1@v0.0.1 with >=v0.1.0; not-here-2@v0.0.1 with >=v0.1.0"), + err: errors.Errorf(errFmtIncompatibleDependency, "existing package not-here-1@v0.0.1 is incompatible with constraint >=v0.1.0; existing package not-here-2@v0.0.1 is incompatible with constraint >=v0.1.0"), }, }, "SuccessfulSelfExistValidDependencies": { From f3622a75798da8b1a89386cb59a98a24517e3305 Mon Sep 17 00:00:00 2001 From: ravilr Date: Sat, 4 May 2024 16:58:41 -0700 Subject: [PATCH 161/370] Clear SSA field managers from composed resources when migrating to composition functions Signed-off-by: ravilr --- .../composite/composition_functions.go | 115 ++++++++++++++++++ test/e2e/apiextensions_test.go | 50 ++++++++ .../composition-xfn.yaml | 39 ++++++ .../setup/composition.yaml | 5 + .../setup/functions.yaml | 6 + 5 files changed, 215 insertions(+) create mode 100644 test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml create mode 100644 test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 08875fee3..a126dac18 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" "sort" + "strings" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/structpb" @@ -122,6 +123,7 @@ type xr struct { ComposedResourceObserver ComposedResourceGarbageCollector ExtraResourcesFetcher + ManagedFieldsUpgrader } // A FunctionRunner runs a single Composition Function. @@ -180,6 +182,14 @@ func (fn ComposedResourceGarbageCollectorFn) GarbageCollectComposedResources(ctx return fn(ctx, owner, observed, desired) } +// A ManagedFieldsUpgrader upgrades an objects managed fields from client-side +// apply to server-side apply. This is necessary when an object was previously +// managed using client-side apply, but should now be managed using server-side +// apply. See https://github.com/kubernetes/kubernetes/issues/99003 for details. +type ManagedFieldsUpgrader interface { + Upgrade(ctx context.Context, obj client.Object) error +} + // A FunctionComposerOption is used to configure a FunctionComposer. type FunctionComposerOption func(*FunctionComposer) @@ -215,6 +225,15 @@ func WithComposedResourceGarbageCollector(d ComposedResourceGarbageCollector) Fu } } +// WithManagedFieldsUpgrader configures how the FunctionComposer should upgrade +// composed resources managed fields from client-side apply to +// server-side apply. +func WithManagedFieldsUpgrader(u ManagedFieldsUpgrader) FunctionComposerOption { + return func(p *FunctionComposer) { + p.composite.ManagedFieldsUpgrader = u + } +} + // NewFunctionComposer returns a new Composer that supports composing resources using // both Patch and Transform (P&T) logic and a pipeline of Composition Functions. func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComposerOption) *FunctionComposer { @@ -232,6 +251,7 @@ func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComp ComposedResourceObserver: NewExistingComposedResourceObserver(kube, f), ComposedResourceGarbageCollector: NewDeletingComposedResourceGarbageCollector(kube), NameGenerator: names.NewNameGenerator(kube), + ManagedFieldsUpgrader: NewPatchingManagedFieldsUpgrader(kube), }, pipeline: r, @@ -476,6 +496,19 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{}, errors.Wrap(err, errApplyXRRefs) } + // TODO: Remove this call to Upgrade once no supported version of + // Crossplane have native P&T available. We only need to upgrade field managers if the + // native PTComposer might have applied the composed resources before, using the + // default client-side apply field manager "crossplane", + // but now migrated to use Composition functions, which uses server-side apply instead. + // Without this managedFields upgrade, the composed resources ends up having shared ownership + // of fields and field removals won't sync properly. + for _, cd := range observed { + if err := c.composite.ManagedFieldsUpgrader.Upgrade(ctx, cd.Resource); err != nil { + return CompositionResult{}, errors.Wrap(err, "cannot upgrade composed resource's managed fields from client-side to server-side apply") + } + } + // Produce our array of resources to return to the Reconciler. The // Reconciler uses this array to determine whether the XR is ready. resources := make([]ComposedResource, 0, len(desired)) @@ -825,3 +858,85 @@ func UpdateResourceRefs(xr resource.ComposedResourcesReferencer, desired Compose xr.SetResourceReferences(refs) } + +// A PatchingManagedFieldsUpgrader uses a JSON patch to upgrade an object's +// managed fields from client-side to server-side apply. The upgrade is a no-op +// if the object does not need upgrading. +type PatchingManagedFieldsUpgrader struct { + client client.Writer +} + +// NewPatchingManagedFieldsUpgrader returns a ManagedFieldsUpgrader that uses a +// JSON patch to upgrade and object's managed fields from client-side to +// server-side apply. +func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpgrader { + return &PatchingManagedFieldsUpgrader{client: w} +} + +// Upgrade the supplied composed object's field managers from client-side to server-side +// apply. +// +// This is a multi-step process. +// +// Step 1: All fields are owned by manager 'crossplane' operation 'Update'. This +// represents all fields set by the XR controller up to this point. +// +// Step 2: Upgrade is called for the first time. We clear all field managers. +// +// Step 3: The XR controller server-side applies its fully specified intent +// as field manager with prefix 'apiextensions.crossplane.io/composed/'. This becomes the +// manager of all the fields that are part of the XR controller's fully +// specified intent. All existing fields the XR controller didn't specify +// become owned by a special manager - 'before-first-apply', operation 'Update'. +// +// Step 4: Upgrade is called for the second time. It deletes the +// 'before-first-apply' field manager entry. Only the XR composed field manager +// remains. +func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object) error { + // The composed resource doesn't exist, nothing to upgrade. + if !meta.WasCreated(obj) { + return nil + } + + foundSSA := false + foundBFA := false + idxBFA := -1 + + for i, e := range obj.GetManagedFields() { + if strings.HasPrefix(e.Manager, FieldOwnerComposedPrefix) { + foundSSA = true + } + if e.Manager == "before-first-apply" { + foundBFA = true + idxBFA = i + } + } + + switch { + // If our SSA field manager exists and the before-first-apply field manager + // doesn't, we've already done the upgrade. Don't do it again. + case foundSSA && !foundBFA: + return nil + + // We found our SSA field manager but also before-first-apply. It should now + // be safe to delete before-first-apply. + case foundSSA && foundBFA: + p := []byte(fmt.Sprintf(`[ + {"op": "remove", "path": "/metadata/managedFields/%d"}, + {"op": "replace", "path": "/metadata/resourceVersion", "value": "%s"} + ]`, idxBFA, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot remove before-first-apply from field managers") + + // We didn't find our SSA field manager. This means we haven't started the + // upgrade. The first thing we want to do is clear all managed fields. + // After we do this we'll let our SSA field manager apply the fields it + // cares about. The result will be that our SSA field manager shares + // ownership with a new manager named 'before-first-apply'. + default: + p := []byte(fmt.Sprintf(`[ + {"op": "replace", "path": "/metadata/managedFields", "value": [{}]}, + {"op": "replace", "path": "/metadata/resourceVersion", "value": "%s"} + ]`, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") + } +} diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 7906d8704..743e681b6 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -21,6 +21,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/e2e-framework/pkg/features" "sigs.k8s.io/e2e-framework/third_party/helm" @@ -377,6 +378,55 @@ func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { ) } +// TestPropagateFieldsRemovalToComposed tests Crossplane's end-to-end SSA syncing +// functionality of clear propagation of fields from claim->XR->MR, when existing +// composition and resources are migrated from native P-and-T to functions pipeline mode. +func TestPropagateFieldsRemovalToComposed(t *testing.T) { + manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" + environment.Test(t, + features.New(t.Name()). + WithLabel(LabelArea, LabelAreaAPIExtensions). + WithLabel(LabelSize, LabelSizeSmall). + WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). + WithLabel(config.LabelTestSuite, SuiteSSAClaims). + WithSetup("EnableSSAClaims", funcs.AllOf( + funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteSSAClaims)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + WithSetup("PrerequisitesAreCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + )). + Assess("CreateClaim", funcs.AllOf( + funcs.ApplyClaim(FieldManager, manifests, "claim.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), + funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available()), + )). + Assess("ConvertToPipelineCompositionUpgrade", funcs.ApplyResources(FieldManager, manifests, "composition-xfn.yaml")). + Assess("UpdateClaim", funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml")). + Assess("FieldsRemovalPropagatedToMR", funcs.AllOf( + // field removals and updates are propagated claim -> XR -> MR, after converting composition from native to pipeline mode + funcs.ComposedResourcesHaveFieldValueWithin(1*time.Minute, manifests, "claim.yaml", + "spec.forProvider.fields.tags[newtag]", funcs.NotFound, + funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"})), + funcs.ComposedResourcesHaveFieldValueWithin(1*time.Minute, manifests, "claim.yaml", + "spec.forProvider.fields.tags[tag]", "v1", + funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"})), + )). + WithTeardown("DeleteClaim", funcs.AllOf( + funcs.DeleteResources(manifests, "claim.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.ResourcesDeletedAfterListedAreGone(3*time.Minute, manifests, "setup/*.yaml", nopList)). + WithTeardown("DisableSSAClaims", funcs.AllOf( + funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToBase()), // Disable our feature flag. + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + Feature(), + ) +} + func TestCompositionSelection(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/composition-selection" environment.Test(t, diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml new file mode 100644 index 000000000..70646fe58 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: xnopresources.nop.example.org +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: XNopResource + mode: Pipeline + pipeline: + - functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionStatus: "True" + conditionType: Ready + time: 0s + fields: + tags: {} + name: nop-resource-1 + patches: + - fromFieldPath: spec.coolField + toFieldPath: metadata.annotations["cf"] + type: FromCompositeFieldPath + - fromFieldPath: metadata.annotations["cf"] + toFieldPath: status.coolerField + type: ToCompositeFieldPath + - fromFieldPath: spec.parameters.tags + toFieldPath: spec.forProvider.fields.tags + type: FromCompositeFieldPath + step: patch-and-transform \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml index e8d925799..8c407842a 100644 --- a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml @@ -17,6 +17,8 @@ spec: - conditionType: Ready conditionStatus: "True" time: 0s + fields: + tags: {} patches: - type: FromCompositeFieldPath fromFieldPath: spec.coolField @@ -24,3 +26,6 @@ spec: - type: ToCompositeFieldPath fromFieldPath: metadata.annotations["cf"] toFieldPath: status.coolerField + - fromFieldPath: spec.parameters.tags + toFieldPath: spec.forProvider.fields.tags + type: FromCompositeFieldPath \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml new file mode 100644 index 000000000..1c5f98aa7 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml @@ -0,0 +1,6 @@ +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.5.0 \ No newline at end of file From 1acad720bb7a1e80ca49c452dfce2f0ab830c1ee Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Tue, 7 May 2024 10:11:36 +0200 Subject: [PATCH 162/370] Bump crossplane-runtime to v1.17.0-rc.0 Signed-off-by: Jared Watts --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 034404ed6..11bd26148 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.8.1 - github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 + github.com/crossplane/crossplane-runtime v1.17.0-rc.0 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.1 diff --git a/go.sum b/go.sum index 492095aaa..2fba40415 100644 --- a/go.sum +++ b/go.sum @@ -125,8 +125,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5 h1:Jiqj9j43gUX/goitNa86/ociah8G74C3pIGwIPSZsks= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1.0.20240226223305-2c81cc6326e5/go.mod h1:rG/KJwyA4iGMCubZ1EXs39Ow7XvOcWEfb1u3jkNekfw= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0 h1:v+JZ+94bQhunadP3wM64Mw6OnpPTwmiZRrShZEUQoMI= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0/go.mod h1:Pz2tdGVMF6KDGzHZOkvKro0nKc8EzK0sb/nSA7pH4Dc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= From a30eeb244ce6ef7bf68b66e412537efa5514b58b Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Tue, 7 May 2024 10:35:51 +0200 Subject: [PATCH 163/370] chore: add observedGeneration field to core CRDs This new field is a result of the latest bump of crossplane-runtime and all changes in this commit were generated by `make generate`. Signed-off-by: Jared Watts --- ...plane.io_compositeresourcedefinitions.yaml | 7 +++++ ...ns.crossplane.io_compositionrevisions.yaml | 26 ++++++++++++++----- ...extensions.crossplane.io_compositions.yaml | 6 ++--- .../apiextensions.crossplane.io_usages.yaml | 7 +++++ ....crossplane.io_configurationrevisions.yaml | 7 +++++ .../pkg.crossplane.io_configurations.yaml | 7 +++++ .../pkg.crossplane.io_functionrevisions.yaml | 7 +++++ cluster/crds/pkg.crossplane.io_functions.yaml | 7 +++++ .../pkg.crossplane.io_providerrevisions.yaml | 7 +++++ cluster/crds/pkg.crossplane.io_providers.yaml | 7 +++++ 10 files changed, 79 insertions(+), 9 deletions(-) diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 8bb056fd0..99c507489 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -483,6 +483,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index e8a17232d..3c4b7db94 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -287,7 +287,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -701,7 +701,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -1239,7 +1239,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -1614,6 +1614,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -1913,7 +1920,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -2327,7 +2334,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -2865,7 +2872,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -3240,6 +3247,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index e8a037e9d..61e6d4c66 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -282,7 +282,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -696,7 +696,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -1237,7 +1237,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index b4a4cc072..cc8f7e557 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -176,6 +176,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index ed34a2b54..7fb52c93e 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -159,6 +159,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index 6bc35a71e..42022bdba 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -151,6 +151,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 4695ed512..7fb151816 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -202,6 +202,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index cc895736c..7e5342cc6 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -181,6 +181,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 92ff14575..961bfc454 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -202,6 +202,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index d18030821..717433f4f 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -183,6 +183,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. From 2bd5a908523880dbbf5cfdde4c5b6df4347e4de8 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 7 May 2024 23:42:59 -0700 Subject: [PATCH 164/370] Remove the RBAC namespace controller I'm confident no-one uses this controller. It's been disabled by default since Crossplane v1.13, and since v1.15 anyone who tries to enable it would get an error linking to the deprecation tracking issue. No-one has raised any concerns about removing it. Signed-off-by: Nic Cope --- cmd/crossplane/rbac/rbac.go | 24 -- .../controller/rbac/controller/options.go | 4 - .../controller/rbac/namespace/fuzz_test.go | 39 --- .../controller/rbac/namespace/reconciler.go | 256 --------------- .../rbac/namespace/reconciler_test.go | 296 ----------------- internal/controller/rbac/namespace/roles.go | 136 -------- .../controller/rbac/namespace/roles_test.go | 308 ------------------ internal/controller/rbac/namespace/watch.go | 100 ------ .../controller/rbac/namespace/watch_test.go | 90 ----- internal/controller/rbac/rbac.go | 8 +- 10 files changed, 1 insertion(+), 1260 deletions(-) delete mode 100644 internal/controller/rbac/namespace/fuzz_test.go delete mode 100644 internal/controller/rbac/namespace/reconciler.go delete mode 100644 internal/controller/rbac/namespace/reconciler_test.go delete mode 100644 internal/controller/rbac/namespace/roles.go delete mode 100644 internal/controller/rbac/namespace/roles_test.go delete mode 100644 internal/controller/rbac/namespace/watch.go delete mode 100644 internal/controller/rbac/namespace/watch_test.go diff --git a/cmd/crossplane/rbac/rbac.go b/cmd/crossplane/rbac/rbac.go index db170b100..ca8dcdb70 100644 --- a/cmd/crossplane/rbac/rbac.go +++ b/cmd/crossplane/rbac/rbac.go @@ -18,7 +18,6 @@ limitations under the License. package rbac import ( - "strings" "time" "github.com/alecthomas/kong" @@ -37,22 +36,9 @@ import ( "github.com/crossplane/crossplane/internal/xpkg" ) -// Available RBAC management policies. -const ( - ManagementPolicyAll = string(rbaccontroller.ManagementPolicyAll) - ManagementPolicyBasic = string(rbaccontroller.ManagementPolicyBasic) -) - // KongVars represent the kong variables associated with the CLI parser // required for the RBAC enum interpolation. var KongVars = kong.Vars{ //nolint:gochecknoglobals // We treat these as constants. - "rbac_manage_default_var": ManagementPolicyBasic, - "rbac_manage_enum_var": strings.Join( - []string{ - ManagementPolicyAll, - ManagementPolicyBasic, - }, - ", "), "rbac_default_registry": xpkg.DefaultRegistry, } @@ -76,9 +62,6 @@ type startCommand struct { LeaderElection bool `env:"LEADER_ELECTION" help:"Use leader election for the controller manager." name:"leader-election" short:"l"` Registry string `default:"${rbac_default_registry}" env:"REGISTRY" help:"Default registry used to fetch packages when not specified in tag." short:"r"` - ManagementPolicy string `hidden:"" name:"manage" short:"m"` - DeprecatedManagementPolicy string `default:"${rbac_manage_default_var}" enum:"${rbac_manage_enum_var}" hidden:"" name:"deprecated-manage"` - SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` MaxReconcileRate int `default:"10" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` @@ -86,12 +69,6 @@ type startCommand struct { // Run the RBAC manager. func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { - if c.ManagementPolicy != "" { - return errors.New("--manage is deprecated, you can use --deprecated-manage until it is removed: see https://github.com/crossplane/crossplane/issues/5227") - } - - log.Debug("Starting", "policy", c.DeprecatedManagementPolicy) - cfg, err := ctrl.GetConfig() if err != nil { return errors.Wrap(err, "cannot get config") @@ -119,7 +96,6 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { GlobalRateLimiter: ratelimiter.NewGlobal(c.MaxReconcileRate), }, AllowClusterRole: c.ProviderClusterRole, - ManagementPolicy: rbaccontroller.ManagementPolicy(c.DeprecatedManagementPolicy), DefaultRegistry: c.Registry, } diff --git a/internal/controller/rbac/controller/options.go b/internal/controller/rbac/controller/options.go index 549d27378..a9f821e26 100644 --- a/internal/controller/rbac/controller/options.go +++ b/internal/controller/rbac/controller/options.go @@ -40,10 +40,6 @@ const ( type Options struct { controller.Options - // ManagementPolicy specifies which roles the RBAC manager should - // manage. - ManagementPolicy ManagementPolicy - // AllowClusterRole is used to determine what additional RBAC // permissions may be granted to Providers that request them. The // provider may request any permission that appears in the named role. diff --git a/internal/controller/rbac/namespace/fuzz_test.go b/internal/controller/rbac/namespace/fuzz_test.go deleted file mode 100644 index bc1dbd1d7..000000000 --- a/internal/controller/rbac/namespace/fuzz_test.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICEE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIO OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "testing" - - fuzz "github.com/AdaLogics/go-fuzz-headers" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" -) - -func FuzzRenderRoles(f *testing.F) { - f.Fuzz(func(_ *testing.T, data []byte) { - ff := fuzz.NewConsumer(data) - ns := &corev1.Namespace{} - ff.GenerateStruct(ns) - crs := make([]rbacv1.ClusterRole, 0) - ff.CreateSlice(&crs) - if len(crs) == 0 { - return - } - _ = RenderRoles(ns, crs) - }) -} diff --git a/internal/controller/rbac/namespace/reconciler.go b/internal/controller/rbac/namespace/reconciler.go deleted file mode 100644 index 2b30bb389..000000000 --- a/internal/controller/rbac/namespace/reconciler.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package namespace implements the RBAC manager's support for namespaces. -package namespace - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/meta" - "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" - "github.com/crossplane/crossplane-runtime/pkg/resource" - - "github.com/crossplane/crossplane/internal/controller/rbac/controller" -) - -const ( - timeout = 2 * time.Minute - - errGetNamespace = "cannot get Namespace" - errApplyRole = "cannot apply Roles" - errListRoles = "cannot list ClusterRoles" -) - -// Event reasons. -const ( - reasonApplyRoles event.Reason = "ApplyRoles" -) - -// A RoleRenderer renders Roles for a given Namespace. -type RoleRenderer interface { - // RenderRoles for the supplied Namespace. - RenderRoles(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role -} - -// A RoleRenderFn renders Roles for the supplied Namespace. -type RoleRenderFn func(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role - -// RenderRoles renders Roles for the supplied Namespace. -func (fn RoleRenderFn) RenderRoles(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role { - return fn(d, crs) -} - -// Setup adds a controller that reconciles a Namespace by creating a series of -// opinionated Roles that may be bound to allow access to resources within that -// namespace. -func Setup(mgr ctrl.Manager, o controller.Options) error { - name := "rbac/namespace" - - r := NewReconciler(mgr, - WithLogger(o.Logger.WithValues("controller", name)), - WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name)))) - - return ctrl.NewControllerManagedBy(mgr). - Named(name). - For(&corev1.Namespace{}). - Owns(&rbacv1.Role{}). - Watches(&rbacv1.ClusterRole{}, &EnqueueRequestForNamespaces{client: mgr.GetClient()}). - WithOptions(o.ForControllerRuntime()). - Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) -} - -// ReconcilerOption is used to configure the Reconciler. -type ReconcilerOption func(*Reconciler) - -// WithLogger specifies how the Reconciler should log messages. -func WithLogger(log logging.Logger) ReconcilerOption { - return func(r *Reconciler) { - r.log = log - } -} - -// WithRecorder specifies how the Reconciler should record Kubernetes events. -func WithRecorder(er event.Recorder) ReconcilerOption { - return func(r *Reconciler) { - r.record = er - } -} - -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } -} - -// WithRoleRenderer specifies how the Reconciler should render RBAC -// Roles. -func WithRoleRenderer(rr RoleRenderer) ReconcilerOption { - return func(r *Reconciler) { - r.rbac = rr - } -} - -// NewReconciler returns a Reconciler of Namespaces. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - r := &Reconciler{ - // TODO(negz): Is Updating appropriate here? Probably. - client: resource.ClientApplicator{ - Client: mgr.GetClient(), - Applicator: resource.NewAPIUpdatingApplicator(mgr.GetClient()), - }, - - rbac: RoleRenderFn(RenderRoles), - - log: logging.NewNopLogger(), - record: event.NewNopRecorder(), - } - - for _, f := range opts { - f(r) - } - return r -} - -// A Reconciler reconciles Namespaces. -type Reconciler struct { - client resource.ClientApplicator - rbac RoleRenderer - - log logging.Logger - record event.Recorder -} - -// Reconcile a Namespace by creating a series of opinionated Roles that may be -// bound to allow access to resources within that namespace. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.log.WithValues("request", req) - log.Debug("Reconciling") - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ns := &corev1.Namespace{} - if err := r.client.Get(ctx, req.NamespacedName, ns); err != nil { - // In case object is not found, most likely the object was deleted and - // then disappeared while the event was in the processing queue. We - // don't need to take any action in that case. - log.Debug(errGetNamespace, "error", err) - return reconcile.Result{}, errors.Wrap(resource.IgnoreNotFound(err), errGetNamespace) - } - - log = log.WithValues( - "uid", ns.GetUID(), - "version", ns.GetResourceVersion(), - "name", ns.GetName(), - ) - - if meta.WasDeleted(ns) { - // There's nothing to do if our namespace is being deleted. Any Roles we - // created will be deleted along with the namespace. - return reconcile.Result{Requeue: false}, nil - } - - // NOTE(negz): We don't expect there to be an unwieldy amount of roles, so - // we just list and pass them all. We're listing from a cache that handles - // label selectors locally, so filtering with a label selector here won't - // meaningfully improve performance relative to filtering in RenderRoles. - // https://github.com/kubernetes-sigs/controller-runtime/blob/d6829e9/pkg/cache/internal/cache_reader.go#L131 - l := &rbacv1.ClusterRoleList{} - if err := r.client.List(ctx, l); err != nil { - if kerrors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - err = errors.Wrap(err, errListRoles) - r.record.Event(ns, event.Warning(reasonApplyRoles, err)) - return reconcile.Result{}, err - } - - var applied []string //nolint:prealloc // We don't know how many roles we'll apply. - for _, rl := range r.rbac.RenderRoles(ns, l.Items) { - log := log.WithValues("role-name", rl.GetName()) - rl := rl // Pin range variable so we can take its address. - - err := r.client.Apply(ctx, &rl, resource.MustBeControllableBy(ns.GetUID()), resource.AllowUpdateIf(RolesDiffer)) - if resource.IsNotAllowed(err) { - log.Debug("Skipped no-op RBAC Role apply") - continue - } - if err != nil { - if kerrors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - err = errors.Wrap(err, errApplyRole) - r.record.Event(ns, event.Warning(reasonApplyRoles, err)) - return reconcile.Result{}, err - } - - log.Debug("Applied RBAC Role") - applied = append(applied, rl.GetName()) - } - - if len(applied) > 0 { - r.record.Event(ns, event.Normal(reasonApplyRoles, fmt.Sprintf("Applied RBAC Roles: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, applied)))) - } - - return reconcile.Result{Requeue: false}, nil -} - -// RolesDiffer returns true if the supplied objects are different Roles. We -// consider Roles to be different if their crossplane annotations or rules do not match. -func RolesDiffer(current, desired runtime.Object) bool { - // Calling this with anything but Roles is a programming error. If it - // happens, we probably do want to panic. - c := current.(*rbacv1.Role) //nolint:forcetypeassert // See above. - d := desired.(*rbacv1.Role) //nolint:forcetypeassert // See above. - return !equalRolesAnnotations(c, d) || !cmp.Equal(c.Rules, d.Rules) -} - -// equalRolesAnnotations compares the crossplane rbac annotations (prefixed by "rbac.crossplane.io/") -// of two Roles and returns true if they are equal. -func equalRolesAnnotations(current, desired *rbacv1.Role) bool { - currentFiltered := make(map[string]string) - for k, v := range current.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) { - currentFiltered[k] = v - } - } - - desiredFiltered := make(map[string]string) - for k, v := range desired.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) { - desiredFiltered[k] = v - } - } - return cmp.Equal(currentFiltered, desiredFiltered) -} diff --git a/internal/controller/rbac/namespace/reconciler_test.go b/internal/controller/rbac/namespace/reconciler_test.go deleted file mode 100644 index 02c932c79..000000000 --- a/internal/controller/rbac/namespace/reconciler_test.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "io" - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" - "github.com/crossplane/crossplane-runtime/pkg/test" -) - -func TestReconcile(t *testing.T) { - errBoom := errors.New("boom") - testLog := logging.NewLogrLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(io.Discard)).WithName("testlog")) - now := metav1.Now() - - type args struct { - mgr manager.Manager - opts []ReconcilerOption - } - type want struct { - r reconcile.Result - err error - } - - cases := map[string]struct { - reason string - args args - want want - }{ - "NamespaceNotFound": { - reason: "We should not return an error if the Namespace was not found.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), - }, - }, - want: want{ - r: reconcile.Result{}, - }, - }, - "GetNamespaceError": { - reason: "We should return any other error encountered while getting a Namespace.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errGetNamespace), - }, - }, - "NamespaceDeleted": { - reason: "We should return early if the namespace was deleted.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*corev1.Namespace) - d.SetDeletionTimestamp(&now) - return nil - }), - }, - }), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - "ListClusterRolesError": { - reason: "We should return an error encountered listing ClusterRoles.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(errBoom), - }, - }), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errListRoles), - }, - }, - "ApplyRoleError": { - reason: "We should return an error encountered applying a Role.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return errBoom - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errApplyRole), - }, - }, - "SuccessfulNoOp": { - reason: "We should not requeue when no Roles need applying.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { - // Simulate a no-op change by not allowing the update. - return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - "SuccessfulApply": { - reason: "We should not requeue when we successfully apply our Roles.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return nil - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, WithLogger(testLog))...) - got, err := r.Reconcile(context.Background(), reconcile.Request{}) - - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) - } - }) - } -} - -func TestRolesDiffer(t *testing.T) { - cases := map[string]struct { - current runtime.Object - desired runtime.Object - want bool - }{ - "Equal": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: false, - }, - "EqualMixedNonCrossplane": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "rbac.crossplane.io/a": "a", - "not-managed-by-crossplane/b": "b", - }, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: false, - }, - "AnnotationsDiffer": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/b": "b"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: true, - }, - "RulesDiffer": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - }, - want: true, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - got := RolesDiffer(tc.current, tc.desired) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("RolesDiffer(...): -want, +got\n:%s", diff) - } - }) - } -} diff --git a/internal/controller/rbac/namespace/roles.go b/internal/controller/rbac/namespace/roles.go deleted file mode 100644 index dfb1133c0..000000000 --- a/internal/controller/rbac/namespace/roles.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICEE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIO OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "sort" - "strings" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -const ( - nameAdmin = "crossplane-admin" - nameEdit = "crossplane-edit" - nameView = "crossplane-view" - - keyPrefix = "rbac.crossplane.io/" - - keyAggToAdmin = keyPrefix + "aggregate-to-ns-admin" - keyAggToEdit = keyPrefix + "aggregate-to-ns-edit" - keyAggToView = keyPrefix + "aggregate-to-ns-view" - - keyBaseOfAdmin = keyPrefix + "base-of-ns-admin" - keyBaseOfEdit = keyPrefix + "base-of-ns-edit" - keyBaseOfView = keyPrefix + "base-of-ns-view" - - keyXRD = keyPrefix + "xrd" - - keyAggregated = "aggregated-by-crossplane" - - valTrue = "true" - valAccept = "xrd-claim-accepted" -) - -// RenderRoles for the supplied namespace by aggregating rules from the supplied -// cluster roles. -func RenderRoles(ns *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role { - // Our list of CRs has no guaranteed order, so we sort them in order to - // ensure we don't reorder our RBAC rules on each update. - sort.Slice(crs, func(i, j int) bool { return crs[i].GetName() < crs[j].GetName() }) - - admin := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameAdmin, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - edit := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameEdit, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - view := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameView, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - - gvk := schema.GroupVersionKind{Version: "v1", Kind: "Namespace"} - meta.AddOwnerReference(admin, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - meta.AddOwnerReference(edit, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - meta.AddOwnerReference(view, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - - accepts := map[string]bool{} - for k, v := range ns.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) && v == valAccept { - accepts[strings.TrimPrefix(k, keyPrefix)] = true - } - } - - acrs := crSelector{keyAggToAdmin, keyBaseOfAdmin, accepts} - ecrs := crSelector{keyAggToEdit, keyBaseOfEdit, accepts} - vcrs := crSelector{keyAggToView, keyBaseOfView, accepts} - - // TODO(negz): Annotate rendered Roles to indicate which ClusterRoles they - // are aggregating rules from? This aggregation is likely to be surprising - // to the uninitiated. - for _, cr := range crs { - if acrs.Select(cr) { - admin.Rules = append(admin.Rules, cr.Rules...) - } - - if ecrs.Select(cr) { - edit.Rules = append(edit.Rules, cr.Rules...) - } - - if vcrs.Select(cr) { - view.Rules = append(view.Rules, cr.Rules...) - } - } - - return []rbacv1.Role{*admin, *edit, *view} -} - -type crSelector struct { - keyAgg string - keyBase string - accepts map[string]bool -} - -func (s crSelector) Select(cr rbacv1.ClusterRole) bool { - l := cr.GetLabels() - - // All cluster roles must have an aggregation key to be selected. - if l[s.keyAgg] != valTrue { - return false - } - - // Cluster roles must either be the base of this role, or pertain to an XRD - // that this namespace accepts a claim from. - return l[s.keyBase] == valTrue || s.accepts[l[keyXRD]] -} diff --git a/internal/controller/rbac/namespace/roles_test.go b/internal/controller/rbac/namespace/roles_test.go deleted file mode 100644 index 991d15159..000000000 --- a/internal/controller/rbac/namespace/roles_test.go +++ /dev/null @@ -1,308 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -func TestCRSelector(t *testing.T) { - xrdName := "composites.example.org" - - type fields struct { - keyAgg string - keyBase string - accepts map[string]bool - } - - cases := map[string]struct { - reason string - fields fields - cr rbacv1.ClusterRole - want bool - }{ - "MissingAggregationLabel": { - reason: "Only ClusterRoles with the aggregation label should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyBaseOfAdmin: valTrue, - }}}, - want: false, - }, - "OnlyAggregationLabel": { - reason: "ClusterRoles must have either the base label or the label of an accepted XRD to be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - }}}, - want: false, - }, - "IsBaseRole": { - reason: "ClusterRoles with the aggregation and base labels should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }}}, - want: true, - }, - "IsAcceptedXRDRole": { - reason: "ClusterRoles with the aggregation and an accepted XRD label should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyXRD: xrdName, - }}}, - want: true, - }, - "IsUnknownXRDRole": { - reason: "ClusterRoles with the aggregation label but an unknown XRD label should be ignored", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyXRD: "unknown.example.org", // An XRD we don't accept. - }}}, - want: false, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - crs := crSelector{tc.fields.keyAgg, tc.fields.keyBase, tc.fields.accepts} - got := crs.Select(tc.cr) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("crs.Select(...): -want, +got:\n%s\n", diff) - } - }) - } -} - -func TestRenderClusterRoles(t *testing.T) { - name := "spacename" - uid := types.UID("no-you-id") - - ctrl := true - owner := metav1.OwnerReference{ - APIVersion: "v1", - Kind: "Namespace", - Name: name, - UID: uid, - Controller: &ctrl, - BlockOwnerDeletion: &ctrl, - } - - crNameA := "A" - crNameB := "B" - crNameC := "C" - - ruleA := rbacv1.PolicyRule{APIGroups: []string{"A"}} - ruleB := rbacv1.PolicyRule{APIGroups: []string{"B"}} - ruleC := rbacv1.PolicyRule{APIGroups: []string{"C"}} - - xrdName := "guilty-gear-xrd" - - type args struct { - ns *corev1.Namespace - crs []rbacv1.ClusterRole - } - - cases := map[string]struct { - reason string - args args - want []rbacv1.Role - }{ - "APlainOldNamespace": { - reason: "A namespace with no annotations should get admin, edit, and view roles with only base rules, if any exist.", - args: args{ - ns: &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid}}, - crs: []rbacv1.ClusterRole{ - { - // This role's rules should be aggregated to the admin role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameA, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleA}, - }, - { - // This role's rules should also be aggregated to the admin role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameB, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleB}, - }, - { - // This role doesn't have any interesting labels. It should not be aggregated. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameC, - Labels: map[string]string{}, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - want: []rbacv1.Role{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameAdmin, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameEdit, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameView, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - }, - }, - }, - "ANamespaceThatAcceptsClaims": { - reason: "A namespace that is annotated to accept claims should get admin, edit, and view roles with base and XRD rules, if they exist.", - args: args{ - ns: &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - UID: uid, - Annotations: map[string]string{keyPrefix + xrdName: valAccept}, - }, - }, - crs: []rbacv1.ClusterRole{ - { - // This role's rules should be aggregated to the admin and edit roles. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameA, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - keyAggToEdit: valTrue, - keyBaseOfEdit: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleA}, - }, - { - // This role's rules should also be aggregated to the admin and edit roles. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameB, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyAggToEdit: valTrue, - keyXRD: xrdName, // The namespace accepts the claim this XRD offers. - }, - }, - Rules: []rbacv1.PolicyRule{ruleB}, - }, - { - // This role's rules should be aggregated to the view role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameC, - Labels: map[string]string{ - keyAggToView: valTrue, - keyXRD: xrdName, // The namespace accepts the claim this XRD offers. - }, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - want: []rbacv1.Role{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameAdmin, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameEdit, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameView, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - got := RenderRoles(tc.args.ns, tc.args.crs) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nRenderRoles(...): -want, +got:\n%s\n", tc.reason, diff) - } - }) - } -} diff --git a/internal/controller/rbac/namespace/watch.go b/internal/controller/rbac/namespace/watch.go deleted file mode 100644 index b9b42cd65..000000000 --- a/internal/controller/rbac/namespace/watch.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "strings" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type adder interface { - Add(item any) -} - -// EnqueueRequestForNamespaces enqueues a reconcile for all namespaces whenever -// a ClusterRole with the aggregation labels we're concerned with changes. This -// is unusual, but we expect there to be relatively few ClusterRoles, and we -// have no way of relating a specific ClusterRoles back to the Roles that -// aggregate it. This is the approach the upstream aggregation controller uses. -// https://github.com/kubernetes/kubernetes/blob/323f348/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go#L188 -type EnqueueRequestForNamespaces struct { - client client.Reader -} - -// Create adds a NamespacedName for the supplied CreateEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -// Update adds a NamespacedName for the supplied UpdateEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.ObjectOld, q) - e.add(ctx, evt.ObjectNew, q) -} - -// Delete adds a NamespacedName for the supplied DeleteEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -// Generic adds a NamespacedName for the supplied GenericEvent if its Object is -// an aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -func (e *EnqueueRequestForNamespaces) add(ctx context.Context, obj runtime.Object, queue adder) { - cr, ok := obj.(*rbacv1.ClusterRole) - if !ok { - return - } - - if !aggregates(cr) { - return - } - - l := &corev1.NamespaceList{} - if err := e.client.List(ctx, l); err != nil { - return - } - - for _, ns := range l.Items { - queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.GetName()}}) - } -} - -func aggregates(obj metav1.Object) bool { - for k := range obj.GetLabels() { - if strings.HasPrefix(k, keyPrefix) { - return true - } - } - return false -} diff --git a/internal/controller/rbac/namespace/watch_test.go b/internal/controller/rbac/namespace/watch_test.go deleted file mode 100644 index 9aea56cea..000000000 --- a/internal/controller/rbac/namespace/watch_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/test" -) - -var _ handler.EventHandler = &EnqueueRequestForNamespaces{} - -type addFn func(item any) - -func (fn addFn) Add(item any) { - fn(item) -} - -func TestAdd(t *testing.T) { - name := "coolname" - - cases := map[string]struct { - client client.Reader - ctx context.Context - obj runtime.Object - queue adder - }{ - "ObjectIsNotAClusterRole": { - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "ClusterRoleIsNotAggregated": { - obj: &rbacv1.ClusterRole{}, - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "ListNamespacesError": { - client: &test.MockClient{ - MockList: test.NewMockListFn(errors.New("boom")), - }, - obj: &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{keyAggToAdmin: valTrue}}}, - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "SuccessfulEnqueue": { - client: &test.MockClient{ - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - nsl := o.(*corev1.NamespaceList) - *nsl = corev1.NamespaceList{Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: name}}}} - return nil - }), - }, - obj: &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{keyAggToAdmin: valTrue}}}, - queue: addFn(func(got any) { - want := reconcile.Request{NamespacedName: types.NamespacedName{Name: name}} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("-want, +got:\n%s\n", diff) - } - }), - }, - } - - for _, tc := range cases { - e := &EnqueueRequestForNamespaces{client: tc.client} - e.add(tc.ctx, tc.obj, tc.queue) - } -} diff --git a/internal/controller/rbac/rbac.go b/internal/controller/rbac/rbac.go index ce6922cc3..3e73aca95 100644 --- a/internal/controller/rbac/rbac.go +++ b/internal/controller/rbac/rbac.go @@ -22,7 +22,6 @@ import ( "github.com/crossplane/crossplane/internal/controller/rbac/controller" "github.com/crossplane/crossplane/internal/controller/rbac/definition" - "github.com/crossplane/crossplane/internal/controller/rbac/namespace" "github.com/crossplane/crossplane/internal/controller/rbac/provider/binding" "github.com/crossplane/crossplane/internal/controller/rbac/provider/roles" ) @@ -38,10 +37,5 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { return err } } - - if o.ManagementPolicy != controller.ManagementPolicyAll { - return nil - } - - return namespace.Setup(mgr, o) + return nil } From b7fd0bd324abc555e933d175a782fdaaf2721f2d Mon Sep 17 00:00:00 2001 From: Seth Duke <33556724+sethfduke@users.noreply.github.com> Date: Wed, 8 May 2024 08:30:44 -0400 Subject: [PATCH 165/370] Add Hyland Software to Adopters Signed-off-by: Seth Duke <33556724+sethfduke@users.noreply.github.com> --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index cf982aa11..81eceb8df 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -85,3 +85,4 @@ This list is sorted in the order that organizations were added to it. | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | | [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | +| [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | From ba00579ba96707ef8a00b9f6208577d196565c36 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 8 May 2024 22:52:08 -0700 Subject: [PATCH 166/370] Fix locking in the PackagedFunctionRunner The runner was relying on potentially stale state. A race could cause Goroutine A's state to become outdated if Goroutine B updated it between when Goroutine A released its read lock and took a write lock. Signed-off-by: Nic Cope --- internal/xfn/function_runner.go | 54 +++++++++++++++++---------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/internal/xfn/function_runner.go b/internal/xfn/function_runner.go index 3a55331e0..762c41b56 100644 --- a/internal/xfn/function_runner.go +++ b/internal/xfn/function_runner.go @@ -187,12 +187,24 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) return nil, errors.Errorf(errFmtEmptyEndpoint, active.GetName()) } + // If we have a connection for the up-to-date endpoint, return it. r.connsMx.RLock() conn, ok := r.conns[name] + if ok && conn.Target() == active.Status.Endpoint { + defer r.connsMx.RUnlock() + return conn, nil + } r.connsMx.RUnlock() + // Either we didn't have a connection, or it wasn't up-to-date. + r.connsMx.Lock() + defer r.connsMx.Unlock() + + // Another Goroutine might have updated the connections between when we + // released the read lock and took the write lock, so check again. + conn, ok = r.conns[name] if ok { - // We have a connection for the up-to-date endpoint. Return it. + // We now have a connection for the up-to-date endpoint. if conn.Target() == active.Status.Endpoint { return conn, nil } @@ -202,6 +214,7 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) // already closed or in the process of closing. log.Debug("Closing gRPC client connection with stale target", "old-target", conn.Target(), "new-target", active.Status.Endpoint) _ = conn.Close() + delete(r.conns, name) } // This context is only used for setting up the connection. @@ -221,9 +234,7 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) return nil, errors.Wrapf(err, errFmtDialFunction, active.Status.Endpoint, active.GetName()) } - r.connsMx.Lock() r.conns[name] = conn - r.connsMx.Unlock() log.Debug("Created new gRPC client connection", "target", active.Status.Endpoint) return conn, nil @@ -258,17 +269,16 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex // path where no connections need garbage collecting we shouldn't // take it at all. + // No need to take a write lock or list Functions if there's no work to do. r.connsMx.RLock() - connections := make([]string, 0, len(r.conns)) - for name := range r.conns { - connections = append(connections, name) + if len(r.conns) == 0 { + defer r.connsMx.RUnlock() + return 0, nil } r.connsMx.RUnlock() - // No need to list Functions if there's no work to do. - if len(connections) == 0 { - return 0, nil - } + r.connsMx.Lock() + defer r.connsMx.Unlock() l := &pkgv1beta1.FunctionList{} if err := r.client.List(ctx, l); err != nil { @@ -280,28 +290,20 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex functionExists[f.GetName()] = true } - // Build a list of connections to garbage collect. - gc := make([]string, 0) - for _, name := range connections { - if !functionExists[name] { - gc = append(gc, name) + // Garbage collect connections. + closed := 0 + for name := range r.conns { + if functionExists[name] { + continue } - } - // No need to take a write lock if there's no work to do. - if len(gc) == 0 { - return 0, nil - } - - r.log.Debug("Closing gRPC client connections for Functions that are no longer installed", "functions", gc) - r.connsMx.Lock() - for _, name := range gc { // Close only returns an error is if the connection is already // closed or in the process of closing. _ = r.conns[name].Close() delete(r.conns, name) + closed++ + r.log.Debug("Closed gRPC client connection to Function that is no longer installed", "function", name) } - r.connsMx.Unlock() - return len(gc), nil + return closed, nil } From fa7c12b4da33548ee09c5badea1058487477c8a0 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 9 May 2024 12:14:31 +0300 Subject: [PATCH 167/370] Include composite/claim status permissions to managed roles Signed-off-by: Hasan Turken --- internal/controller/rbac/definition/roles.go | 35 +++++++++++++------ .../controller/rbac/definition/roles_test.go | 16 ++++----- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/internal/controller/rbac/definition/roles.go b/internal/controller/rbac/definition/roles.go index 8de8dbbb0..8e5ee29eb 100644 --- a/internal/controller/rbac/definition/roles.go +++ b/internal/controller/rbac/definition/roles.go @@ -111,8 +111,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsEdit, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsEdit, }, }, } @@ -130,8 +133,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsView, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsView, }, }, } @@ -148,8 +154,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsBrowse, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsBrowse, }, }, } @@ -176,14 +185,20 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole edit.Rules = append(edit.Rules, rbacv1.PolicyRule{ APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.ClaimNames.Plural}, - Verbs: verbsEdit, + Resources: []string{ + d.Spec.ClaimNames.Plural, + d.Spec.ClaimNames.Plural + suffixStatus, + }, + Verbs: verbsEdit, }) view.Rules = append(view.Rules, rbacv1.PolicyRule{ APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.ClaimNames.Plural}, - Verbs: verbsView, + Resources: []string{ + d.Spec.ClaimNames.Plural, + d.Spec.ClaimNames.Plural + suffixStatus, + }, + Verbs: verbsView, }) // The browse role only includes composite resources; not claims. diff --git a/internal/controller/rbac/definition/roles_test.go b/internal/controller/rbac/definition/roles_test.go index 645071e7f..bc80477e4 100644 --- a/internal/controller/rbac/definition/roles_test.go +++ b/internal/controller/rbac/definition/roles_test.go @@ -96,7 +96,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsEdit, }, }, @@ -114,7 +114,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsView, }, }, @@ -131,7 +131,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsBrowse, }, }, @@ -195,12 +195,12 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsEdit, }, { APIGroups: []string{group}, - Resources: []string{pluralXRC}, + Resources: []string{pluralXRC, pluralXRC + suffixStatus}, Verbs: verbsEdit, }, }, @@ -218,12 +218,12 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsView, }, { APIGroups: []string{group}, - Resources: []string{pluralXRC}, + Resources: []string{pluralXRC, pluralXRC + suffixStatus}, Verbs: verbsView, }, }, @@ -241,7 +241,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsBrowse, }, }, From ee1b5a4eeb8235f3268b493e9c49bc6ba8a39c1c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 9 May 2024 15:13:27 -0700 Subject: [PATCH 168/370] Publish E2E test flakes to Buildpulse This generates JUnit files that are used to detect flaky tests. It uses gotestsum to generate the files. As part of this change I've made gotestsum lower the verbosity of E2E tests - I believe we'll only get a detailed summary of test failures. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 10 ++++++++++ Makefile | 22 +++++++++++----------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c7c05fa56..8bf0236d9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -312,6 +312,16 @@ jobs: path: ./logs-kind if-no-files-found: error retention-days: 7 + + - name: Publish E2E Test Flakes + if: '!cancelled()' + uses: buildpulse/buildpulse-action@v0.11.0 + with: + account: 45158470 + repository: 147886080 + path: _output/tests/linux_amd64/e2e-tests.xml + key: ${{ secrets.BUILDPULSE_ACCESS_KEY_ID }} + secret: ${{ secrets.BUILDPULSE_SECRET_ACCESS_KEY }} publish-artifacts: runs-on: ubuntu-22.04 diff --git a/Makefile b/Makefile index f852c8a92..fbc82633d 100644 --- a/Makefile +++ b/Makefile @@ -111,13 +111,6 @@ gen-kustomize-crds: ; done @$(OK) All CRDs added to Kustomize file for local development -# Generate a coverage report for cobertura applying exclusions on -# - generated file -cobertura: - @cat $(GO_TEST_OUTPUT)/coverage.txt | \ - grep -v zz_generated.deepcopy | \ - $(GOCOVER_COBERTURA) > $(GO_TEST_OUTPUT)/cobertura-coverage.xml - e2e-tag-images: @$(INFO) Tagging E2E test images @docker tag $(BUILD_REGISTRY)/$(PROJECT_NAME)-$(TARGETARCH) crossplane-e2e/$(PROJECT_NAME):latest || $(FAIL) @@ -131,17 +124,25 @@ E2E_TEST_FLAGS ?= # https://github.com/kubernetes-sigs/e2e-framework/issues/282 E2E_PATH = $(WORK_DIR)/e2e +GOTESTSUM_VERSION ?= v1.11.0 +GOTESTSUM := $(TOOLS_HOST_DIR)/gotestsum + +$(GOTESTSUM): + @$(INFO) installing gotestsum + @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install gotest.tools/gotestsum@$(GOTESTSUM_VERSION) || $(FAIL) + @$(OK) installed gotestsum + e2e-run-tests: @$(INFO) Run E2E tests @mkdir -p $(E2E_PATH) @ln -sf $(KIND) $(E2E_PATH)/kind @ln -sf $(HELM) $(E2E_PATH)/helm - @PATH="$(E2E_PATH):${PATH}" $(GO_TEST_OUTPUT)/e2e $(E2E_TEST_FLAGS) || $(FAIL) + @PATH="$(E2E_PATH):${PATH}" $(GOTESTSUM) --format testname --junitfile $(GO_TEST_OUTPUT)/e2e-tests.xml --raw-command -- $(GO) tool test2json -t -p e2e $(GO_TEST_OUTPUT)/e2e -test.v $(E2E_TEST_FLAGS) || $(FAIL) @$(OK) Run E2E tests e2e.init: build e2e-tag-images -e2e.run: $(KIND) $(HELM3) e2e-run-tests +e2e.run: $(GOTESTSUM) $(KIND) $(HELM3) e2e-run-tests # Update the submodules, such as the common build scripts. submodules: @@ -173,14 +174,13 @@ run: go.build @# To see other arguments that can be provided, run the command with --help instead $(GO_OUT_DIR)/$(PROJECT_NAME) core start --debug -.PHONY: manifests cobertura submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile e2e.test.images +.PHONY: manifests submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile e2e.test.images # ==================================================================================== # Special Targets define CROSSPLANE_MAKE_HELP Crossplane Targets: - cobertura Generate a coverage report for cobertura applying exclusions on generated files. submodules Update the submodules, such as the common build scripts. run Run crossplane locally, out-of-cluster. Useful for development. From ef4897b4e9c41812712ab0c97b1fe28f666a846b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 9 May 2024 15:17:28 -0700 Subject: [PATCH 169/370] Pass the codecov token Without this token we're being severely rate limited when uploading coverage to Codecov. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8bf0236d9..044c1046e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -233,6 +233,7 @@ jobs: with: flags: unittests file: _output/tests/linux_amd64/coverage.txt + token: ${{ secrets.CODECOV_TOKEN }} e2e-tests: runs-on: ubuntu-22.04 From c9686016f1d5120b04df43b8c66403a93b56dcd8 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 9 May 2024 15:40:32 -0700 Subject: [PATCH 170/370] Bump Codecov to v4 Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 044c1046e..c800a54b4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: run: make -j2 test - name: Publish Unit Test Coverage - uses: codecov/codecov-action@ab904c41d6ece82784817410c45d8b8c02684457 # v3 + uses: codecov/codecov-action@v4 with: flags: unittests file: _output/tests/linux_amd64/coverage.txt @@ -320,9 +320,9 @@ jobs: with: account: 45158470 repository: 147886080 - path: _output/tests/linux_amd64/e2e-tests.xml key: ${{ secrets.BUILDPULSE_ACCESS_KEY_ID }} secret: ${{ secrets.BUILDPULSE_SECRET_ACCESS_KEY }} + path: _output/tests/linux_amd64/e2e-tests.xml publish-artifacts: runs-on: ubuntu-22.04 From 2acb65e4b561f5f9a1f1e99d8f6231a082eefff6 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 10 May 2024 15:18:52 +0100 Subject: [PATCH 171/370] tests: AllOf failing immediately if fail-fast Signed-off-by: Philippe Scorsolini --- test/e2e/funcs/feature.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index da73f7d5d..ab96525ad 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -66,13 +66,18 @@ const DefaultPollInterval = time.Millisecond * 500 type onSuccessHandler func(o k8s.Object) -// AllOf runs the supplied functions in order. +// AllOf runs the supplied functions in order. If a function fails the test and +// the environment is configured to fail fast (e2e-framework's -fail-fast flag) +// the remaining functions will not be run. func AllOf(fns ...features.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { t.Helper() for _, fn := range fns { ctx = fn(ctx, t, c) + if t.Failed() && c.FailFast() { + break + } } return ctx } From 0ded6a1bef640175d8b0d1c8ac4f681e94a82ba9 Mon Sep 17 00:00:00 2001 From: Brandon Powers Date: Fri, 10 May 2024 06:51:34 -0400 Subject: [PATCH 172/370] Add Skillsoft to `ADOPTERS.md` Signed-off-by: Brandon Powers --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 81eceb8df..857a5ecb5 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -86,3 +86,4 @@ This list is sorted in the order that organizations were added to it. | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | | [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | | [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | +| [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | From d187f6a31a3fda5f23b5a061e7f5e8ebef3808b6 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 11 May 2024 08:08:03 +0000 Subject: [PATCH 173/370] chore(deps): pin dependencies --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c800a54b4..71c320fd3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: run: make -j2 test - name: Publish Unit Test Coverage - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be # v4 with: flags: unittests file: _output/tests/linux_amd64/coverage.txt @@ -316,7 +316,7 @@ jobs: - name: Publish E2E Test Flakes if: '!cancelled()' - uses: buildpulse/buildpulse-action@v0.11.0 + uses: buildpulse/buildpulse-action@d0d30f53585cf16b2e01811a5a753fd47968654a # v0.11.0 with: account: 45158470 repository: 147886080 From 43ff58985a2bf84300664577078237655c449719 Mon Sep 17 00:00:00 2001 From: akhyaradn Date: Sun, 12 May 2024 02:02:49 +0800 Subject: [PATCH 174/370] correcting help tag Signed-off-by: akhyaradn --- cmd/crossplane/core/core.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index d82dd5522..0a5545fed 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -91,7 +91,7 @@ type startCommand struct { CABundlePath string `env:"CA_BUNDLE_PATH" help:"Additional CA bundle to use when fetching packages from registry."` UserAgent string `default:"${default_user_agent}" env:"USER_AGENT" help:"The User-Agent header that will be set on all package requests."` - PackageRuntime string `default:"Deployment" env:"PACKAGE_RUNTIME" helm:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)"` + PackageRuntime string `default:"Deployment" env:"PACKAGE_RUNTIME" help:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)"` SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` From 4c8148bd12cb98c419250c6bbbfd7bb61efe93b2 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sun, 12 May 2024 08:09:06 +0000 Subject: [PATCH 175/370] chore(deps): update actions/checkout digest to 0ad4b8f --- .github/workflows/backport.yml | 2 +- .github/workflows/ci.yml | 16 ++++++++-------- .github/workflows/commands.yml | 2 +- .github/workflows/promote.yml | 2 +- .github/workflows/scan.yaml | 2 +- .github/workflows/tag.yml | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 7f311959e..4ffdd8f46 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,7 +22,7 @@ jobs: if: github.event.pull_request.merged steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 71c320fd3..c923fa93c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -81,7 +81,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -127,7 +127,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -171,7 +171,7 @@ jobs: if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -192,7 +192,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -261,7 +261,7 @@ jobs: install: true - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -352,7 +352,7 @@ jobs: install: true - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true @@ -472,7 +472,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 - name: Setup Buf uses: bufbuild/buf-setup-action@v1 diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index 0232d5760..9cb34aa1f 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -21,7 +21,7 @@ jobs: permission-level: write - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: fetch-depth: 0 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 34ba83962..685c53336 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -36,7 +36,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: submodules: true diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index dbab23e31..790ba857c 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -17,7 +17,7 @@ jobs: supported_releases: ${{ steps.get-releases.outputs.supported_releases }} steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: fetch-depth: 0 diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml index 6f12ffd7c..cb730a0bb 100644 --- a/.github/workflows/tag.yml +++ b/.github/workflows/tag.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 - name: Create Tag uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1 From bafa969d1bbe7c9329cee2bc52d526ffb40445d8 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 08:09:45 +0000 Subject: [PATCH 176/370] chore(deps): update actions/create-github-app-token digest to a0de6af --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index e54b14bcf..013f0ca42 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -32,7 +32,7 @@ jobs: - name: Get token id: get-github-app-token - uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1 + uses: actions/create-github-app-token@a0de6af83968303c8c955486bf9739a57d23c7f1 # v1 with: app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} From 260414fe8aea6560d2f4e4227d4ebfd2eaf276d6 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 11:16:49 +0000 Subject: [PATCH 177/370] chore(deps): update actions/setup-go digest to cdcb360 --- .github/workflows/ci.yml | 12 ++++++------ .github/workflows/promote.yml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c923fa93c..a06bc14d5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: submodules: true - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} @@ -86,7 +86,7 @@ jobs: submodules: true - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} @@ -132,7 +132,7 @@ jobs: submodules: true - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} @@ -200,7 +200,7 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} @@ -269,7 +269,7 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} @@ -360,7 +360,7 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 685c53336..bcea77f88 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -41,7 +41,7 @@ jobs: submodules: true - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 with: go-version: ${{ env.GO_VERSION }} From d924aef9ec335aa9662bfb035fd4772ec80b820c Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 11:17:14 +0000 Subject: [PATCH 178/370] chore(deps): update golang version to v1.22.3 --- .github/workflows/ci.yml | 2 +- .github/workflows/promote.yml | 2 +- go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c923fa93c..0e2e81291 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.22.2' + GO_VERSION: '1.22.3' GOLANGCI_VERSION: 'v1.57.2' DOCKER_BUILDX_VERSION: 'v0.10.0' diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 685c53336..6b37ace75 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -21,7 +21,7 @@ on: env: # Common versions - GO_VERSION: '1.22.2' + GO_VERSION: '1.22.3' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether diff --git a/go.mod b/go.mod index 11bd26148..2e6fb6800 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/crossplane/crossplane go 1.21 -toolchain go1.22.2 +toolchain go1.22.3 require ( dario.cat/mergo v1.0.0 From 8628d5b1e916502c339702b18c7dec77e6cc0d93 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 11:17:20 +0000 Subject: [PATCH 179/370] chore(deps): update renovatebot/github-action action to v40.1.11 --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 013f0ca42..baf6fbeb9 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -38,7 +38,7 @@ jobs: private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} - name: Self-hosted Renovate - uses: renovatebot/github-action@2e021d24483d81e77e0e902d0809adfbfff276fc # v40.1.10 + uses: renovatebot/github-action@063e0c946b9c1af35ef3450efc44114925d6e8e6 # v40.1.11 env: RENOVATE_REPOSITORIES: ${{ github.repository }} # Use GitHub API to create commits From a796de80775028894661ed21a5dd4a890c1b5c6f Mon Sep 17 00:00:00 2001 From: Tom Stian Berget Date: Mon, 13 May 2024 14:19:31 +0200 Subject: [PATCH 180/370] Update ADOPTERS.md Signed-off-by: Tom Stian Berget --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 857a5ecb5..8818cdd73 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -87,3 +87,4 @@ This list is sorted in the order that organizations were added to it. | [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | | [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | | [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | +| [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | From dba1edeab7b98b6fbff64e58b0cb3ccfe40bc28c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 14 May 2024 16:06:51 -0700 Subject: [PATCH 181/370] Don't check for buf breaking changes on release branches The action we use to do this doesn't yet understand branches. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 34ba68201..5f051335a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -482,24 +482,18 @@ jobs: with: input: apis - - name: Detect Breaking Changes in Protocol Buffers (Master Branch) + # buf-breaking-action doesn't support branches + # https://github.com/bufbuild/buf-push-action/issues/34 + - name: Detect Breaking Changes in Protocol Buffers uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1 - # We want to run this for the master branch, and PRs. - if: ${{ ! startsWith(github.ref, 'refs/heads/release-') }} + # We want to run this for the master branch, and PRs against master. + if: ${{ github.ref == 'refs/heads/master' || github.base_ref == 'master' }} with: input: apis against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=master,subdir=apis" - - - name: Detect Breaking Changes in Protocol Buffers (Release Branch) - uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1 - # We want to run this only on release branches. - if: ${{ startsWith(github.ref, 'refs/heads/release-') }} - with: - input: apis - against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=${GITHUB_REF_NAME},subdir=apis" - name: Push Protocol Buffers to Buf Schema Registry - if: ${{ github.repository == 'crossplane/crossplane' && (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release-')) }} + if: ${{ github.repository == 'crossplane/crossplane' && github.ref == 'refs/heads/master' }} uses: bufbuild/buf-push-action@v1 with: input: apis From 9be884b975ee83906b915d00015421e4655705a7 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Wed, 15 May 2024 09:50:44 -0700 Subject: [PATCH 182/370] build: add release-1.16 to Renovate baseBranches Signed-off-by: Jared Watts --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 5ed453749..b8d934815 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -12,7 +12,7 @@ "prConcurrentLimit": 5, // The branches renovate should target // PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.13","release-1.14","release-1.15"], + "baseBranches": ["master","release-1.13","release-1.14","release-1.15", "release-1.16"], "ignorePaths": [ "design/**", // We test upgrades, so leave it on an older version on purpose. From ac5edfddd8a99abce41ea880884931bfff6f0cda Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 14 May 2024 20:21:25 -0700 Subject: [PATCH 183/370] Use stripped out crossplane/build submodule Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 24 ++++++++++++------------ .gitmodules | 2 +- Makefile | 6 +----- build | 2 +- 4 files changed, 15 insertions(+), 19 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 34ba68201..c1a451e9f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,8 +54,8 @@ jobs: key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check - name: Check Diff run: make check-diff @@ -108,8 +108,8 @@ jobs: key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check # We could run 'make lint' to ensure our desired Go version, but we prefer # this action because it leaves 'annotations' (i.e. it comments on PRs to @@ -154,8 +154,8 @@ jobs: key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check - name: Initialize CodeQL uses: github/codeql-action/init@c7f9125735019aa87cfc361530512d50ea439c71 # v3 @@ -222,8 +222,8 @@ jobs: key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check - name: Run Unit Tests run: make -j2 test @@ -292,8 +292,8 @@ jobs: restore-keys: | ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check - name: Build Helm Chart run: make -j2 build @@ -382,8 +382,8 @@ jobs: key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-pkg- - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Download Go Modules + run: make modules.download modules.check - name: Build Artifacts run: make -j2 build.all diff --git a/.gitmodules b/.gitmodules index c2fad4707..8f84209c8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "build"] path = build - url = https://github.com/upbound/build + url = https://github.com/crossplane/build diff --git a/Makefile b/Makefile index fbc82633d..3a9c80127 100644 --- a/Makefile +++ b/Makefile @@ -42,8 +42,7 @@ GO_LINT_ARGS ?= "--fix" # ==================================================================================== # Setup Kubernetes tools -USE_HELM3 = true -HELM3_VERSION = v3.14.4 +HELM_VERSION = v3.14.4 KIND_VERSION = v0.21.0 -include build/makelib/k8s_tools.mk @@ -81,9 +80,6 @@ fallthrough: submodules @echo Initial setup complete. Running make again . . . @make -manifests: - @$(WARN) Deprecated. Please run make generate instead. - CRD_DIR = cluster/crds crds.clean: diff --git a/build b/build index 75a9fe3ae..231258db2 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit 75a9fe3ae6b6de82c5f7ddc6a267617940f16b83 +Subproject commit 231258db281237379d8ec0c6e4af9d7c1ae5cc4a From e6c77a9f725ead41c77da8c752a7a8f3d1612393 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Wed, 15 May 2024 18:01:35 -0700 Subject: [PATCH 184/370] Update releases table and renovate branches post 1.16 Signed-off-by: Jared Watts --- .github/renovate.json5 | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index b8d934815..8440ced47 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -12,7 +12,7 @@ "prConcurrentLimit": 5, // The branches renovate should target // PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.13","release-1.14","release-1.15", "release-1.16"], + "baseBranches": ["master","release-1.14","release-1.15", "release-1.16"], "ignorePaths": [ "design/**", // We test upgrades, so leave it on an older version on purpose. diff --git a/README.md b/README.md index 723f28ebe..24f7abb44 100644 --- a/README.md +++ b/README.md @@ -24,12 +24,12 @@ documentation]. | Release | Release Date | EOL | |:-------:|:-------------:|:--------:| -| v1.13 | Jul 27, 2023 | May 2024 | | v1.14 | Nov 1, 2023 | Aug 2024 | | v1.15 | Feb 15, 2024 | Nov 2024 | -| v1.16 | Early May '24 | Feb 2025 | +| v1.16 | May 15, 2024 | Feb 2025 | | v1.17 | Early Aug '24 | May 2025 | | v1.18 | Early Nov '24 | Aug 2025 | +| v1.19 | Early Feb '25 | Nov 2025 | You can subscribe to the [community calendar] to track all release dates, and find the most recent releases on the [releases] page. From ae3b72fdd52476b54c6e8fbb09302b4986e0bb58 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 08:08:46 +0000 Subject: [PATCH 185/370] chore(deps): update actions/upload-artifact digest to 6546280 --- .github/workflows/ci.yml | 6 +++--- .github/workflows/scan.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5894f56d8..4c6141246 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -306,7 +306,7 @@ jobs: run: make e2e E2E_TEST_FLAGS="-test.v -test.failfast -fail-fast --kind-logs-location ./logs-kind --test-suite ${{ matrix.test-suite }}" - name: Upload artifacts - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 if: failure() with: name: e2e-kind-logs-${{ matrix.test-suite }} @@ -393,7 +393,7 @@ jobs: BUILD_ARGS: "--load" - name: Publish Artifacts to GitHub - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 with: name: output path: _output/** @@ -457,7 +457,7 @@ jobs: language: go - name: Upload Crash - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 790ba857c..c5c4e1cd3 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -117,7 +117,7 @@ jobs: output: 'trivy-results.sarif' - name: Upload Artifact - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 with: name: trivy-${{ env.escaped_filename }}.sarif path: trivy-results.sarif From 85d6948d4990401464f75079892b347bc9ab13b8 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 16:01:06 +0000 Subject: [PATCH 186/370] chore(deps): update codecov/codecov-action digest to 6d79887 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5894f56d8..81ac0cb32 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: run: make -j2 test - name: Publish Unit Test Coverage - uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be # v4 + uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4 with: flags: unittests file: _output/tests/linux_amd64/coverage.txt From 8a1a8ed3e4537d784894c9ec6c13e972cb633b3b Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 16:38:23 +0000 Subject: [PATCH 187/370] chore(deps): update gcr.io/distroless/static docker digest to 4197211 --- cluster/images/crossplane/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/images/crossplane/Dockerfile b/cluster/images/crossplane/Dockerfile index 9ae58992e..1d4ad3eae 100644 --- a/cluster/images/crossplane/Dockerfile +++ b/cluster/images/crossplane/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/distroless/static@sha256:6d31326376a7834b106f281b04f67b5d015c31732f594930f2ea81365f99d60c +FROM gcr.io/distroless/static@sha256:41972110a1c1a5c0b6adb283e8aa092c43c31f7c5d79b8656fbffff2c3e61f05 ARG TARGETOS ARG TARGETARCH From 7fd997187730aaeb1a8218329b5133f1fcc925bf Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 16:38:27 +0000 Subject: [PATCH 188/370] chore(deps): update github/codeql-action digest to b7cec75 --- .github/workflows/ci.yml | 4 ++-- .github/workflows/scan.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c6141246..1d98d831b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,12 +158,12 @@ jobs: run: make modules.download modules.check - name: Initialize CodeQL - uses: github/codeql-action/init@c7f9125735019aa87cfc361530512d50ea439c71 # v3 + uses: github/codeql-action/init@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@c7f9125735019aa87cfc361530512d50ea439c71 # v3 + uses: github/codeql-action/analyze@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 trivy-scan-fs: runs-on: ubuntu-22.04 diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index c5c4e1cd3..ef5707ae6 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -124,7 +124,7 @@ jobs: retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@c7f9125735019aa87cfc361530512d50ea439c71 # v3 + uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} From 429f8ff2f8a7d74246817f7a9fa926b483575b2d Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 16:38:31 +0000 Subject: [PATCH 189/370] chore(deps): update golangci/golangci-lint-action digest to d6238b0 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c6141246..3e87e8cd3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -115,7 +115,7 @@ jobs: # this action because it leaves 'annotations' (i.e. it comments on PRs to # point out linter violations). - name: Lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4 + uses: golangci/golangci-lint-action@d6238b002a20823d52840fda27e2d4891c5952dc # v4 with: version: ${{ env.GOLANGCI_VERSION }} skip-cache: true # We do our own caching. From 3c255629452ba2f1e06bd3c63fed35aa468d811b Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 16:38:34 +0000 Subject: [PATCH 190/370] chore(deps): update actions/checkout action to v4.1.5 --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index baf6fbeb9..7ca5f10bd 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -22,7 +22,7 @@ jobs: !github.event.pull_request.head.repo.fork steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 with: submodules: true From fbb9f751da85fcf8313473814edf606cbb5742e8 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 16 May 2024 17:41:25 -0700 Subject: [PATCH 191/370] Add the pkg directory to GO_SUBDIRS There's failing unit tests in there, but we never test them because make test ignores them. Signed-off-by: Nic Cope --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a9c80127..dafd154e6 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 ))) GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/crossplane $(GO_PROJECT)/cmd/crank GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION) -GO_SUBDIRS += cmd internal apis +GO_SUBDIRS += cmd internal apis pkg GO111MODULE = on GOLANGCILINT_VERSION = 1.57.2 GO_LINT_ARGS ?= "--fix" From dade4645790e08fa06ffe0196fb4233ce62f6c95 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 08:09:24 +0000 Subject: [PATCH 192/370] chore(deps): update actions/checkout action to v4.1.6 --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 7ca5f10bd..c19c77628 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -22,7 +22,7 @@ jobs: !github.event.pull_request.head.repo.fork steps: - name: Checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: submodules: true From e14749af06088ac747f2c5c6195c3bc07fde5b6a Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Fri, 17 May 2024 18:33:50 -0700 Subject: [PATCH 193/370] adopters: Clarify production usage for Hyland Software Signed-off-by: Jared Watts --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 8818cdd73..f492479b6 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -85,6 +85,6 @@ This list is sorted in the order that organizations were added to it. | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | | [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | -| [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | +| [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane in production environments to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | | [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | From 94dac8e2e7d3298f824b67ce7a450d2ce3eb5b7a Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Fri, 17 May 2024 19:36:19 -0700 Subject: [PATCH 194/370] test: Fix TestComposedTemplateGetBaseObject to check for correct type This test started to fail once a DeepCopy method was added to the composed.Unstructured type to fix https://github.com/crossplane/crossplane/issues/4970. Now that the full type is returned via that DeepCopy method, this unit test needs to check for this full type instead of the type it embeds. Signed-off-by: Jared Watts --- .../apiextensions/v1/composition/patches_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/validation/apiextensions/v1/composition/patches_test.go b/pkg/validation/apiextensions/v1/composition/patches_test.go index d6d9fbe89..be0cbbb03 100644 --- a/pkg/validation/apiextensions/v1/composition/patches_test.go +++ b/pkg/validation/apiextensions/v1/composition/patches_test.go @@ -33,6 +33,7 @@ import ( xperrors "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" @@ -963,12 +964,14 @@ func TestComposedTemplateGetBaseObject(t *testing.T) { }, }, want: want{ - output: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Service", - "metadata": map[string]interface{}{ - "name": "foo", + output: &composed.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Service", + "metadata": map[string]interface{}{ + "name": "foo", + }, }, }, }, From 724b62697b1113d9e44ce0d228359d5f87cfb14e Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 18 May 2024 08:09:40 +0000 Subject: [PATCH 195/370] fix(deps): update module github.com/emicklei/dot to v1.6.2 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2e6fb6800..228ae8752 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/crossplane/crossplane-runtime v1.17.0-rc.0 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 - github.com/emicklei/dot v1.6.1 + github.com/emicklei/dot v1.6.2 github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.11.0 github.com/golang-jwt/jwt/v5 v5.2.0 diff --git a/go.sum b/go.sum index 2fba40415..258458178 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI= -github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= From cac03a71888795c4bc632b68a6f189d4da7a68f0 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Sat, 18 May 2024 19:12:07 -0700 Subject: [PATCH 196/370] test: Fix TestValidateReadinessCheck to use correct field types in schema Signed-off-by: Jared Watts --- .../v1/composition/readinessChecks_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go index 07496e5d4..4b555a7f4 100644 --- a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go +++ b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go @@ -98,7 +98,12 @@ func TestValidateReadinessCheck(t *testing.T) { FieldPath: "spec.someOtherField", }, )), - gkToCRD: defaultGKToCRDs(), + gkToCRD: buildGkToCRDs( + defaultManagedCrdBuilder().withOption(func(crd *extv1.CustomResourceDefinition) { + crd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someOtherField"] = extv1.JSONSchemaProps{ + Type: "boolean", + } + }).build()), }, want: want{ errs: nil, @@ -114,7 +119,12 @@ func TestValidateReadinessCheck(t *testing.T) { FieldPath: "spec.someOtherField", }, )), - gkToCRD: defaultGKToCRDs(), + gkToCRD: buildGkToCRDs( + defaultManagedCrdBuilder().withOption(func(crd *extv1.CustomResourceDefinition) { + crd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someOtherField"] = extv1.JSONSchemaProps{ + Type: "boolean", + } + }).build()), }, want: want{ errs: nil, @@ -290,7 +300,7 @@ func TestValidateReadinessCheck(t *testing.T) { t.Fatalf("NewValidator() error = %v", err) } got := v.validateReadinessChecksWithSchemas(context.TODO(), tt.args.comp) - if diff := cmp.Diff(got, tt.want.errs, sortFieldErrors(), cmpopts.IgnoreFields(field.Error{}, "Detail")); diff != "" { + if diff := cmp.Diff(tt.want.errs, got, sortFieldErrors(), cmpopts.IgnoreFields(field.Error{}, "Detail")); diff != "" { t.Errorf("validateReadinessChecksWithSchemas(...) = -want, +got\n%s\n", diff) } }) From 28560db577540e683d5969397fda75eecafcc488 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Sat, 18 May 2024 19:12:50 -0700 Subject: [PATCH 197/370] test: Fix TestFromKnownJSONType to be current for Array and Object types Previously, these types were unsupported and were therefore considered invalid. Support for these types was added in the below commits, but the TestFromKnownJSONType test was never updated to match. * https://github.com/crossplane/crossplane/commit/66e02e4cc3e972b64e9a182b0c4a9deb5817237c * https://github.com/crossplane/crossplane/commit/03398921c2d353f315c7088a1dd98b33a4652aa0 Signed-off-by: Jared Watts --- pkg/validation/internal/schema/schema_test.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/pkg/validation/internal/schema/schema_test.go b/pkg/validation/internal/schema/schema_test.go index 76c9e4bad..2c88b7544 100644 --- a/pkg/validation/internal/schema/schema_test.go +++ b/pkg/validation/internal/schema/schema_test.go @@ -223,19 +223,15 @@ func TestFromKnownJSONType(t *testing.T) { out: v1.TransformIOTypeBool, }, }, - "InvalidArray": { - reason: "Array should not be valid", + "ValidArray": { + reason: "Array should be valid and convert properly", args: args{t: KnownJSONTypeArray}, - want: want{ - err: xperrors.Errorf(errFmtUnsupportedJSONType, KnownJSONTypeArray), - }, + want: want{out: v1.TransformIOTypeObject}, }, - "InvalidObject": { - reason: "Object should not be valid", + "ValidObject": { + reason: "Object should be valid and convert properly", args: args{t: KnownJSONTypeObject}, - want: want{ - err: xperrors.Errorf(errFmtUnsupportedJSONType, KnownJSONTypeObject), - }, + want: want{out: v1.TransformIOTypeObject}, }, } for name, tc := range cases { From 4397969c53627234c9203703ffe8993972584767 Mon Sep 17 00:00:00 2001 From: dalton hill Date: Mon, 20 May 2024 11:55:09 -0500 Subject: [PATCH 198/370] updates claim and xr crd reconciler filters to watch the correct objects Signed-off-by: dalton hill --- .../apiextensions/definition/reconciler.go | 1 + .../apiextensions/definition/watch.go | 32 +++++++++++++++++++ .../controller/apiextensions/offered/watch.go | 20 +++++++++--- 3 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 internal/controller/apiextensions/definition/watch.go diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 5f469c4fd..0e661bc20 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -146,6 +146,7 @@ func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { Named(name). For(&v1.CompositeResourceDefinition{}). Owns(&extv1.CustomResourceDefinition{}). + WithEventFilter(resource.NewPredicates(OffersCompositeResource())). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) } diff --git a/internal/controller/apiextensions/definition/watch.go b/internal/controller/apiextensions/definition/watch.go new file mode 100644 index 000000000..e1cd16b08 --- /dev/null +++ b/internal/controller/apiextensions/definition/watch.go @@ -0,0 +1,32 @@ +package definition + +import ( + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/crossplane/crossplane-runtime/pkg/resource" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/xcrd" +) + +// OffersCompositeResource accepts any CompositeResourceDefinition or a +// CustomResourceDefinition that represents a composite. +func OffersCompositeResource() resource.PredicateFn { + return func(obj runtime.Object) bool { + if _, ok := obj.(*v1.CompositeResourceDefinition); ok { + return true + } + + crd, ok := obj.(*extv1.CustomResourceDefinition) + if !ok { + return false + } + for _, c := range crd.Spec.Names.Categories { + if c == xcrd.CategoryComposite { + return true + } + } + return false + } +} diff --git a/internal/controller/apiextensions/offered/watch.go b/internal/controller/apiextensions/offered/watch.go index 462239037..3470767c5 100644 --- a/internal/controller/apiextensions/offered/watch.go +++ b/internal/controller/apiextensions/offered/watch.go @@ -19,6 +19,7 @@ package offered import ( "context" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -30,17 +31,28 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/xcrd" ) -// OffersClaim accepts objects that are a CompositeResourceDefinition and offer -// a composite resource claim. +// OffersClaim accepts a CompositeResourceDefinition that has a claim or a +// CustomResourceDefinition that represents a claim. func OffersClaim() resource.PredicateFn { return func(obj runtime.Object) bool { - d, ok := obj.(*v1.CompositeResourceDefinition) + xrd, ok := obj.(*v1.CompositeResourceDefinition) + if ok { + return xrd.OffersClaim() + } + + crd, ok := obj.(*extv1.CustomResourceDefinition) if !ok { return false } - return d.OffersClaim() + for _, c := range crd.Spec.Names.Categories { + if c == xcrd.CategoryClaim { + return true + } + } + return false } } From dad639aa58cee395f3c9c54f6488cc8580e4ea5c Mon Sep 17 00:00:00 2001 From: dalton hill Date: Mon, 20 May 2024 13:51:21 -0500 Subject: [PATCH 199/370] adding and updating unit tests Signed-off-by: dalton hill --- .../apiextensions/definition/watch_test.go | 85 +++++++++++++++++++ .../apiextensions/offered/watch_test.go | 36 +++++++- 2 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 internal/controller/apiextensions/definition/watch_test.go diff --git a/internal/controller/apiextensions/definition/watch_test.go b/internal/controller/apiextensions/definition/watch_test.go new file mode 100644 index 000000000..8d7ab91f7 --- /dev/null +++ b/internal/controller/apiextensions/definition/watch_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +func TestOffersCompositeResource(t *testing.T) { + cases := map[string]struct { + obj runtime.Object + want bool + }{ + "NotAnXRD": { + want: false, + }, + "XRD": { + obj: &v1.CompositeResourceDefinition{}, + want: true, + }, + "ClaimCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "claim", + }, + }, + }, + }, + want: false, + }, + "CompositeCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "composite", + }, + }, + }, + }, + want: true, + }, + "OtherCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{}, + }, + }, + }, + want: false, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := OffersCompositeResource()(tc.obj) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("\n%s\nOffersCompositeResource(...): -want, +got:\n%s", name, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/offered/watch_test.go b/internal/controller/apiextensions/offered/watch_test.go index 2ee2fe2a8..20bb41cc5 100644 --- a/internal/controller/apiextensions/offered/watch_test.go +++ b/internal/controller/apiextensions/offered/watch_test.go @@ -55,13 +55,47 @@ func TestOffersClaim(t *testing.T) { }, want: true, }, + "ClaimCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "claim", + }, + }, + }, + }, + want: true, + }, + "CompositeCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "composite", + }, + }, + }, + }, + want: false, + }, + "OtherCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{}, + }, + }, + }, + want: false, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { got := OffersClaim()(tc.obj) if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("OffersClaim(...): -want, +got:\n%s", diff) + t.Errorf("\n%s\nOffersClaim(...): -want, +got:\n%s", name, diff) } }) } From d506d1daf5b6eb20def3a7ada425f0f4b4744bef Mon Sep 17 00:00:00 2001 From: dalton hill Date: Mon, 20 May 2024 16:03:34 -0500 Subject: [PATCH 200/370] Splitting generic predicate functions to improve readability and performance. Signed-off-by: dalton hill --- .../apiextensions/definition/reconciler.go | 4 +- .../apiextensions/definition/watch.go | 11 ++--- .../apiextensions/definition/watch_test.go | 10 ++--- .../apiextensions/offered/reconciler.go | 6 +-- .../controller/apiextensions/offered/watch.go | 19 +++++---- .../apiextensions/offered/watch_test.go | 40 ++++++++++++++----- 6 files changed, 56 insertions(+), 34 deletions(-) diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 0e661bc20..2fa6f0470 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" @@ -145,8 +146,7 @@ func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { return ctrl.NewControllerManagedBy(mgr). Named(name). For(&v1.CompositeResourceDefinition{}). - Owns(&extv1.CustomResourceDefinition{}). - WithEventFilter(resource.NewPredicates(OffersCompositeResource())). + Owns(&extv1.CustomResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(IsCompositeResourceCRD()))). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) } diff --git a/internal/controller/apiextensions/definition/watch.go b/internal/controller/apiextensions/definition/watch.go index e1cd16b08..d515d359c 100644 --- a/internal/controller/apiextensions/definition/watch.go +++ b/internal/controller/apiextensions/definition/watch.go @@ -6,18 +6,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" - v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/xcrd" ) -// OffersCompositeResource accepts any CompositeResourceDefinition or a -// CustomResourceDefinition that represents a composite. -func OffersCompositeResource() resource.PredicateFn { +// IsCompositeResourceCRD accepts any CustomResourceDefinition that represents a +// Composite Resource. +func IsCompositeResourceCRD() resource.PredicateFn { return func(obj runtime.Object) bool { - if _, ok := obj.(*v1.CompositeResourceDefinition); ok { - return true - } - crd, ok := obj.(*extv1.CustomResourceDefinition) if !ok { return false diff --git a/internal/controller/apiextensions/definition/watch_test.go b/internal/controller/apiextensions/definition/watch_test.go index 8d7ab91f7..cffcaf048 100644 --- a/internal/controller/apiextensions/definition/watch_test.go +++ b/internal/controller/apiextensions/definition/watch_test.go @@ -26,17 +26,17 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -func TestOffersCompositeResource(t *testing.T) { +func TestIsCompositeResourceCRD(t *testing.T) { cases := map[string]struct { obj runtime.Object want bool }{ - "NotAnXRD": { + "NotCRD": { want: false, }, "XRD": { obj: &v1.CompositeResourceDefinition{}, - want: true, + want: false, }, "ClaimCRD": { obj: &extv1.CustomResourceDefinition{ @@ -76,9 +76,9 @@ func TestOffersCompositeResource(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - got := OffersCompositeResource()(tc.obj) + got := IsCompositeResourceCRD()(tc.obj) if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nOffersCompositeResource(...): -want, +got:\n%s", name, diff) + t.Errorf("\n%s\nIsCompositeResourceCRD(...): -want, +got:\n%s", name, diff) } }) } diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index 8f1eaf990..2111294d9 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -30,6 +30,7 @@ import ( kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -124,9 +125,8 @@ func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { return ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1.CompositeResourceDefinition{}). - Owns(&extv1.CustomResourceDefinition{}). - WithEventFilter(resource.NewPredicates(OffersClaim())). + For(&v1.CompositeResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(OffersClaim()))). + Owns(&extv1.CustomResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(IsClaimCRD()))). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) } diff --git a/internal/controller/apiextensions/offered/watch.go b/internal/controller/apiextensions/offered/watch.go index 3470767c5..a2c83d914 100644 --- a/internal/controller/apiextensions/offered/watch.go +++ b/internal/controller/apiextensions/offered/watch.go @@ -34,20 +34,25 @@ import ( "github.com/crossplane/crossplane/internal/xcrd" ) -// OffersClaim accepts a CompositeResourceDefinition that has a claim or a -// CustomResourceDefinition that represents a claim. +// OffersClaim accepts any CompositeResourceDefinition that offers a claim. func OffersClaim() resource.PredicateFn { return func(obj runtime.Object) bool { - xrd, ok := obj.(*v1.CompositeResourceDefinition) - if ok { - return xrd.OffersClaim() + d, ok := obj.(*v1.CompositeResourceDefinition) + if !ok { + return false } + return d.OffersClaim() + } +} - crd, ok := obj.(*extv1.CustomResourceDefinition) +// IsClaimCRD accepts any CustomResourceDefinition that represents a Claim. +func IsClaimCRD() resource.PredicateFn { + return func(obj runtime.Object) bool { + d, ok := obj.(*extv1.CustomResourceDefinition) if !ok { return false } - for _, c := range crd.Spec.Names.Categories { + for _, c := range d.Spec.Names.Categories { if c == xcrd.CategoryClaim { return true } diff --git a/internal/controller/apiextensions/offered/watch_test.go b/internal/controller/apiextensions/offered/watch_test.go index 20bb41cc5..4c9f128ad 100644 --- a/internal/controller/apiextensions/offered/watch_test.go +++ b/internal/controller/apiextensions/offered/watch_test.go @@ -42,6 +42,10 @@ func TestOffersClaim(t *testing.T) { "NotAnXRD": { want: false, }, + "CRD": { + obj: &extv1.CustomResourceDefinition{}, + want: false, + }, "DoesNotOfferClaim": { obj: &v1.CompositeResourceDefinition{}, want: false, @@ -55,6 +59,30 @@ func TestOffersClaim(t *testing.T) { }, want: true, }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := OffersClaim()(tc.obj) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("\n%s\nOffersClaim(...): -want, +got:\n%s", name, diff) + } + }) + } +} + +func TestIsClaimCRD(t *testing.T) { + cases := map[string]struct { + obj runtime.Object + want bool + }{ + "NotCRD": { + want: false, + }, + "XRD": { + obj: &v1.CompositeResourceDefinition{}, + want: false, + }, "ClaimCRD": { obj: &extv1.CustomResourceDefinition{ Spec: extv1.CustomResourceDefinitionSpec{ @@ -80,22 +108,16 @@ func TestOffersClaim(t *testing.T) { want: false, }, "OtherCRD": { - obj: &extv1.CustomResourceDefinition{ - Spec: extv1.CustomResourceDefinitionSpec{ - Names: extv1.CustomResourceDefinitionNames{ - Categories: []string{}, - }, - }, - }, + obj: &extv1.CustomResourceDefinition{}, want: false, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - got := OffersClaim()(tc.obj) + got := IsClaimCRD()(tc.obj) if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nOffersClaim(...): -want, +got:\n%s", name, diff) + t.Errorf("\n%s\nIsClaimCRD(...): -want, +got:\n%s", name, diff) } }) } From e0bd283e497d1cfa5b5368a818b80d109c4cdc4b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 13:13:02 -0700 Subject: [PATCH 201/370] Move engine and GVK routed cache from c/cr This just moved the files, unedited, as of the below commit. https://github.com/crossplane/crossplane-runtime/commit/8641eb2ba384af17e60f4e3421f8a06a1930b2cc Signed-off-by: Nic Cope --- internal/controller/engine/cache.go | 305 ++++++++++++++++++++++ internal/controller/engine/engine.go | 289 ++++++++++++++++++++ internal/controller/engine/engine_test.go | 223 ++++++++++++++++ 3 files changed, 817 insertions(+) create mode 100644 internal/controller/engine/cache.go create mode 100644 internal/controller/engine/engine.go create mode 100644 internal/controller/engine/engine_test.go diff --git a/internal/controller/engine/cache.go b/internal/controller/engine/cache.go new file mode 100644 index 000000000..fa65bf54e --- /dev/null +++ b/internal/controller/engine/cache.go @@ -0,0 +1,305 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +// GVKRoutedCache is a cache that routes requests by GVK to other caches. +type GVKRoutedCache struct { + scheme *runtime.Scheme + + fallback cache.Cache + + lock sync.RWMutex + delegates map[schema.GroupVersionKind]cache.Cache +} + +// NewGVKRoutedCache returns a new routed cache. +func NewGVKRoutedCache(scheme *runtime.Scheme, fallback cache.Cache) *GVKRoutedCache { + return &GVKRoutedCache{ + scheme: scheme, + fallback: fallback, + delegates: make(map[schema.GroupVersionKind]cache.Cache), + } +} + +var _ cache.Cache = &GVKRoutedCache{} + +// AddDelegate adds a delegated cache for a given GVK. +func (c *GVKRoutedCache) AddDelegate(gvk schema.GroupVersionKind, delegate cache.Cache) { + c.lock.Lock() + defer c.lock.Unlock() + + c.delegates[gvk] = delegate +} + +// RemoveDelegate removes a delegated cache for a given GVK. +func (c *GVKRoutedCache) RemoveDelegate(gvk schema.GroupVersionKind) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.delegates, gvk) +} + +// Get retrieves an object for a given ObjectKey backed by a cache. +func (c *GVKRoutedCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + } + + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.Get(ctx, key, obj, opts...) + } + + return c.fallback.Get(ctx, key, obj, opts...) +} + +// List lists objects for a given ObjectList backed by a cache. +func (c *GVKRoutedCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", list, err) + } + + if !strings.HasSuffix(gvk.Kind, "List") { + // following controller-runtime here which does not support non + // List types. + return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.List(ctx, list, opts...) + } + + return c.fallback.List(ctx, list, opts...) +} + +// GetInformer returns an informer for the given object. +func (c *GVKRoutedCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, errors.Errorf("failed to get GVK for type %T: %w", obj, err) + } + + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.GetInformer(ctx, obj, opts...) + } + + return c.fallback.GetInformer(ctx, obj, opts...) +} + +// GetInformerForKind returns an informer for the given GVK. +func (c *GVKRoutedCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.GetInformerForKind(ctx, gvk, opts...) + } + + return c.fallback.GetInformerForKind(ctx, gvk, opts...) +} + +// RemoveInformer removes an informer entry and stops it if it was running. +func (c *GVKRoutedCache) RemoveInformer(ctx context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + } + + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.RemoveInformer(ctx, obj) + } + + return c.fallback.RemoveInformer(ctx, obj) +} + +// Start for a GVKRoutedCache is a no-op. Start must be called for each delegate. +func (c *GVKRoutedCache) Start(_ context.Context) error { + return nil +} + +// WaitForCacheSync for a GVKRoutedCache waits for all delegates and the +// fallback to sync, and returns false if any of them fails to sync. +func (c *GVKRoutedCache) WaitForCacheSync(ctx context.Context) bool { + c.lock.RLock() + syncedCh := make(chan bool, len(c.delegates)+1) + cas := make([]cache.Cache, 0, len(c.delegates)) + for _, ca := range c.delegates { + cas = append(cas, ca) + } + cas = append(cas, c.fallback) + c.lock.RUnlock() + + var wg sync.WaitGroup + ctx, cancelFn := context.WithCancel(ctx) + + for _, ca := range cas { + wg.Add(1) + go func(ca cache.Cache) { + defer wg.Done() + synced := ca.WaitForCacheSync(ctx) + if !synced { + // first unsynced cache breaks the whole wait + cancelFn() + } + syncedCh <- synced + }(ca) + } + + wg.Wait() + close(syncedCh) + cancelFn() + + // any not synced? + for synced := range syncedCh { + if !synced { + return false + } + } + + return c.fallback.WaitForCacheSync(ctx) +} + +// IndexField adds an index with the given field name on the given object type +// by using the given function to extract the value for that field. +func (c *GVKRoutedCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + } + + c.lock.RLock() + delegate, ok := c.delegates[gvk] + c.lock.RUnlock() + + if ok { + return delegate.IndexField(ctx, obj, field, extractValue) + } + + return c.fallback.IndexField(ctx, obj, field, extractValue) +} + +// cachedRoutedClient wraps a client and routes read requests by GVK to a cache. +type cachedRoutedClient struct { + client.Client + + scheme *runtime.Scheme + cache *GVKRoutedCache +} + +func (c *cachedRoutedClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + } + + c.cache.lock.RLock() + delegate, ok := c.cache.delegates[gvk] + c.cache.lock.RUnlock() + + if ok { + return delegate.Get(ctx, key, obj, opts...) + } + + return c.Client.Get(ctx, key, obj, opts...) +} + +func (c *cachedRoutedClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return errors.Errorf("failed to get GVK for type %T: %w", list, err) + } + + if !strings.HasSuffix(gvk.Kind, "List") { + // following controller-runtime here which does not support non + // List types. + return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + c.cache.lock.RLock() + delegate, ok := c.cache.delegates[gvk] + c.cache.lock.RUnlock() + + if ok { + return delegate.List(ctx, list, opts...) + } + + return c.Client.List(ctx, list, opts...) +} + +// WithGVKRoutedCache returns a manager backed by a GVKRoutedCache. The client +// returned by the manager will route read requests to cached GVKs. +func WithGVKRoutedCache(c *GVKRoutedCache, mgr controllerruntime.Manager) controllerruntime.Manager { + return &routedManager{ + Manager: mgr, + client: &cachedRoutedClient{ + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + cache: c, + }, + cache: c, + } +} + +type routedManager struct { + controllerruntime.Manager + + client client.Client + cache cache.Cache +} + +func (m *routedManager) GetClient() client.Client { + return m.client +} + +func (m *routedManager) GetCache() cache.Cache { + return m.cache +} diff --git a/internal/controller/engine/engine.go b/internal/controller/engine/engine.go new file mode 100644 index 000000000..4552948dc --- /dev/null +++ b/internal/controller/engine/engine.go @@ -0,0 +1,289 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controller provides utilties for working with controllers. +package controller + +import ( + "context" + "sync" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +// Error strings. +const ( + errCreateCache = "cannot create new cache" + errCreateController = "cannot create new controller" + errCrashCache = "cache error" + errCrashController = "controller error" + errWatch = "cannot setup watch" +) + +// A NewCacheFn creates a new controller-runtime cache. +type NewCacheFn func(cfg *rest.Config, o cache.Options) (cache.Cache, error) + +// A NewControllerFn creates a new controller-runtime controller. +type NewControllerFn func(name string, m manager.Manager, o controller.Options) (controller.Controller, error) + +// The default new cache and new controller functions. +// +//nolint:gochecknoglobals // We treat these as constants. +var ( + DefaultNewCacheFn NewCacheFn = cache.New + DefaultNewControllerFn NewControllerFn = controller.NewUnmanaged +) + +// An Engine manages the lifecycles of controller-runtime controllers (and their +// caches). The lifecycles of the controllers are not coupled to lifecycle of +// the engine, nor to the lifecycle of the controller manager it uses. +type Engine struct { + mgr manager.Manager + + started map[string]context.CancelFunc + errors map[string]error + mx sync.RWMutex + + newCache NewCacheFn + newCtrl NewControllerFn +} + +// An EngineOption configures an Engine. +type EngineOption func(*Engine) + +// WithNewCacheFn may be used to configure a different cache implementation. +// DefaultNewCacheFn is used by default. +func WithNewCacheFn(fn NewCacheFn) EngineOption { + return func(e *Engine) { + e.newCache = fn + } +} + +// WithNewControllerFn may be used to configure a different controller +// implementation. DefaultNewControllerFn is used by default. +func WithNewControllerFn(fn NewControllerFn) EngineOption { + return func(e *Engine) { + e.newCtrl = fn + } +} + +// NewEngine produces a new Engine. +func NewEngine(mgr manager.Manager, o ...EngineOption) *Engine { + e := &Engine{ + mgr: mgr, + + started: make(map[string]context.CancelFunc), + errors: make(map[string]error), + + newCache: DefaultNewCacheFn, + newCtrl: DefaultNewControllerFn, + } + + for _, eo := range o { + eo(e) + } + + return e +} + +// IsRunning indicates whether the named controller is running - i.e. whether it +// has been started and does not appear to have crashed. +func (e *Engine) IsRunning(name string) bool { + e.mx.RLock() + defer e.mx.RUnlock() + + _, running := e.started[name] + return running +} + +// Err returns any error encountered by the named controller. The returned error +// is always nil if the named controller is running. +func (e *Engine) Err(name string) error { + e.mx.RLock() + defer e.mx.RUnlock() + + return e.errors[name] +} + +// Stop the named controller. +func (e *Engine) Stop(name string) { + e.done(name, nil) +} + +func (e *Engine) done(name string, err error) { + e.mx.Lock() + defer e.mx.Unlock() + + stop, ok := e.started[name] + if ok { + stop() + delete(e.started, name) + } + + // Don't overwrite the first error if done is called multiple times. + if e.errors[name] != nil { + return + } + e.errors[name] = err +} + +// Watch an object. +type Watch struct { + // one of the two: + kind client.Object + customSource source.Source + + handler handler.EventHandler + predicates []predicate.Predicate +} + +// For returns a Watch for the supplied kind of object. Events will be handled +// by the supplied EventHandler, and may be filtered by the supplied predicates. +func For(kind client.Object, h handler.EventHandler, p ...predicate.Predicate) Watch { + return Watch{kind: kind, handler: h, predicates: p} +} + +// TriggeredBy returns a custom watch for secondary resources triggering the +// controller. source.Kind can be used to create a source for a secondary cache. +// Events will be handled by the supplied EventHandler, and may be filtered by +// the supplied predicates. +func TriggeredBy(source source.Source, h handler.EventHandler, p ...predicate.Predicate) Watch { + return Watch{customSource: source, handler: h, predicates: p} +} + +// Start the named controller. Each controller is started with its own cache +// whose lifecycle is coupled to the controller. The controller is started with +// the supplied options, and configured with the supplied watches. Start does +// not block. +func (e *Engine) Start(name string, o controller.Options, w ...Watch) error { + c, err := e.Create(name, o, w...) + if err != nil { + return err + } + return c.Start(context.Background()) +} + +// NamedController is a controller that's not yet started. It gives access to +// the underlying cache, which may be used e.g. to add indexes. +type NamedController interface { + Start(ctx context.Context) error + GetCache() cache.Cache +} + +type namedController struct { + name string + e *Engine + ca cache.Cache + ctrl controller.Controller +} + +// Create the named controller. Each controller gets its own cache +// whose lifecycle is coupled to the controller. The controller is created with +// the supplied options, and configured with the supplied watches. It is not +// started yet. +func (e *Engine) Create(name string, o controller.Options, w ...Watch) (NamedController, error) { + // Each controller gets its own cache for the GVKs it owns. This cache is + // wrapped by a GVKRoutedCache that routes requests to other GVKs to the + // manager's cache. This way we can share informers for composed resources + // (that's where this is primarily used) with other controllers, but get + // control about the lifecycle of the owned GVKs' informers. + ca, err := e.newCache(e.mgr.GetConfig(), cache.Options{Scheme: e.mgr.GetScheme(), Mapper: e.mgr.GetRESTMapper()}) + if err != nil { + return nil, errors.Wrap(err, errCreateCache) + } + + // Wrap the existing manager to use our cache for the GVKs of this controller. + rc := NewGVKRoutedCache(e.mgr.GetScheme(), e.mgr.GetCache()) + rm := &routedManager{ + Manager: e.mgr, + client: &cachedRoutedClient{ + Client: e.mgr.GetClient(), + scheme: e.mgr.GetScheme(), + cache: rc, + }, + cache: rc, + } + + ctrl, err := e.newCtrl(name, rm, o) + if err != nil { + return nil, errors.Wrap(err, errCreateController) + } + + for _, wt := range w { + if wt.customSource != nil { + if err := ctrl.Watch(wt.customSource, wt.handler, wt.predicates...); err != nil { + return nil, errors.Wrap(err, errWatch) + } + continue + } + + // route cache and client (read) requests to our cache for this GVK. + gvk, err := apiutil.GVKForObject(wt.kind, e.mgr.GetScheme()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get GVK for type %T", wt.kind) + } + rc.AddDelegate(gvk, ca) + + if err := ctrl.Watch(source.Kind(ca, wt.kind), wt.handler, wt.predicates...); err != nil { + return nil, errors.Wrap(err, errWatch) + } + } + + return &namedController{name: name, e: e, ca: ca, ctrl: ctrl}, nil +} + +// Start the named controller. Start does not block. +func (c *namedController) Start(ctx context.Context) error { + if c.e.IsRunning(c.name) { + return nil + } + + ctx, stop := context.WithCancel(ctx) + c.e.mx.Lock() + c.e.started[c.name] = stop + c.e.errors[c.name] = nil + c.e.mx.Unlock() + + go func() { + <-c.e.mgr.Elected() + c.e.done(c.name, errors.Wrap(c.ca.Start(ctx), errCrashCache)) + }() + go func() { + <-c.e.mgr.Elected() + if synced := c.ca.WaitForCacheSync(ctx); !synced { + c.e.done(c.name, errors.New(errCrashCache)) + return + } + c.e.done(c.name, errors.Wrap(c.ctrl.Start(ctx), errCrashController)) + }() + + return nil +} + +// GetCache returns the cache used by the named controller. +func (c *namedController) GetCache() cache.Cache { + return c.ca +} diff --git a/internal/controller/engine/engine_test.go b/internal/controller/engine/engine_test.go new file mode 100644 index 000000000..d49ac2a09 --- /dev/null +++ b/internal/controller/engine/engine_test.go @@ -0,0 +1,223 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/resource/fake" + "github.com/crossplane/crossplane-runtime/pkg/test" +) + +type MockCache struct { + cache.Cache + + MockStart func(stop context.Context) error +} + +func (c *MockCache) Start(stop context.Context) error { + return c.MockStart(stop) +} + +func (c *MockCache) WaitForCacheSync(_ context.Context) bool { + return true +} + +type MockController struct { + controller.Controller + + MockStart func(stop context.Context) error + MockWatch func(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error +} + +func (c *MockController) Start(stop context.Context) error { + return c.MockStart(stop) +} + +func (c *MockController) Watch(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error { + return c.MockWatch(s, h, p...) +} + +func TestEngine(t *testing.T) { + errBoom := errors.New("boom") + + type args struct { + name string + o controller.Options + w []Watch + } + type want struct { + err error + crash error + } + cases := map[string]struct { + reason string + e *Engine + args args + want want + }{ + "NewCacheError": { + reason: "Errors creating a new cache should be returned", + e: NewEngine(&fake.Manager{}, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, errBoom }), + ), + args: args{ + name: "coolcontroller", + }, + want: want{ + err: errors.Wrap(errBoom, errCreateCache), + }, + }, + "NewControllerError": { + reason: "Errors creating a new controller should be returned", + e: NewEngine( + &fake.Manager{ + Scheme: runtime.NewScheme(), + Cache: &MockCache{}, + }, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), + WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { return nil, errBoom }), + ), + args: args{ + name: "coolcontroller", + }, + want: want{ + err: errors.Wrap(errBoom, errCreateController), + }, + }, + "WatchError": { + reason: "Errors adding a watch should be returned", + e: NewEngine( + &fake.Manager{ + Scheme: runtime.NewScheme(), + Cache: &MockCache{}, + }, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), + WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { + c := &MockController{MockWatch: func(source.Source, handler.EventHandler, ...predicate.Predicate) error { return errBoom }} + return c, nil + }), + ), + args: args{ + name: "coolcontroller", + w: []Watch{For(&unstructured.Unstructured{ + Object: map[string]interface{}{"apiVersion": "example.org/v1", "kind": "Thing"}, + }, nil)}, + }, + want: want{ + err: errors.Wrap(errBoom, errWatch), + }, + }, + "SchemeError": { + reason: "Passing an object of unknown GVK", + e: NewEngine( + &fake.Manager{ + Scheme: runtime.NewScheme(), + Cache: &MockCache{}, + }, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), + WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { + c := &MockController{MockWatch: func(source.Source, handler.EventHandler, ...predicate.Predicate) error { return errBoom }} + return c, nil + }), + ), + args: args{ + name: "coolcontroller", + w: []Watch{For(&unstructured.Unstructured{}, nil)}, + }, + want: want{ + err: errors.Wrap(runtime.NewMissingKindErr("unstructured object has no kind"), "failed to get GVK for type *unstructured.Unstructured"), + }, + }, + "CacheCrashError": { + reason: "Errors starting or running a cache should be returned", + e: NewEngine(&fake.Manager{}, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { + c := &MockCache{MockStart: func(_ context.Context) error { return errBoom }} + return c, nil + }), + WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { + c := &MockController{MockStart: func(_ context.Context) error { + return nil + }} + return c, nil + }), + ), + args: args{ + name: "coolcontroller", + }, + want: want{ + crash: errors.Wrap(errBoom, errCrashCache), + }, + }, + "ControllerCrashError": { + reason: "Errors starting or running a controller should be returned", + e: NewEngine(&fake.Manager{}, + WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { + c := &MockCache{MockStart: func(_ context.Context) error { + return nil + }} + return c, nil + }), + WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { + c := &MockController{MockStart: func(_ context.Context) error { + return errBoom + }} + return c, nil + }), + ), + args: args{ + name: "coolcontroller", + }, + want: want{ + crash: errors.Wrap(errBoom, errCrashController), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + err := tc.e.Start(tc.args.name, tc.args.o, tc.args.w...) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Give the goroutines a little time to return an error. If this + // becomes flaky or time consuming we could use a ticker instead. + time.Sleep(100 * time.Millisecond) + + tc.e.Stop(tc.args.name) + if diff := cmp.Diff(tc.want.crash, tc.e.Err(tc.args.name), test.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Err(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} From 697dc711546efcd3eb140656cc009102d25ca42c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 13:18:28 -0700 Subject: [PATCH 202/370] Rename the controller package to engine, rename types to not stutter Signed-off-by: Nic Cope --- internal/controller/engine/cache.go | 2 +- internal/controller/engine/engine.go | 62 ++++++++++++----------- internal/controller/engine/engine_test.go | 20 ++++---- 3 files changed, 43 insertions(+), 41 deletions(-) diff --git a/internal/controller/engine/cache.go b/internal/controller/engine/cache.go index fa65bf54e..b8264b9e8 100644 --- a/internal/controller/engine/cache.go +++ b/internal/controller/engine/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package engine import ( "context" diff --git a/internal/controller/engine/engine.go b/internal/controller/engine/engine.go index 4552948dc..4707d9576 100644 --- a/internal/controller/engine/engine.go +++ b/internal/controller/engine/engine.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package controller provides utilties for working with controllers. -package controller +// Package engine provides utilties for working with controllers. +package engine import ( "context" @@ -57,10 +57,11 @@ var ( DefaultNewControllerFn NewControllerFn = controller.NewUnmanaged ) -// An Engine manages the lifecycles of controller-runtime controllers (and their -// caches). The lifecycles of the controllers are not coupled to lifecycle of -// the engine, nor to the lifecycle of the controller manager it uses. -type Engine struct { +// An ControllerEngine manages the lifecycles of controller-runtime controllers +// (and their caches). The lifecycles of the controllers are not coupled to +// lifecycle of the engine, nor to the lifecycle of the controller manager it +// uses. +type ControllerEngine struct { mgr manager.Manager started map[string]context.CancelFunc @@ -71,28 +72,28 @@ type Engine struct { newCtrl NewControllerFn } -// An EngineOption configures an Engine. -type EngineOption func(*Engine) +// An ControllerEngineOption configures a ControllerEngine. +type ControllerEngineOption func(*ControllerEngine) // WithNewCacheFn may be used to configure a different cache implementation. // DefaultNewCacheFn is used by default. -func WithNewCacheFn(fn NewCacheFn) EngineOption { - return func(e *Engine) { +func WithNewCacheFn(fn NewCacheFn) ControllerEngineOption { + return func(e *ControllerEngine) { e.newCache = fn } } // WithNewControllerFn may be used to configure a different controller // implementation. DefaultNewControllerFn is used by default. -func WithNewControllerFn(fn NewControllerFn) EngineOption { - return func(e *Engine) { +func WithNewControllerFn(fn NewControllerFn) ControllerEngineOption { + return func(e *ControllerEngine) { e.newCtrl = fn } } -// NewEngine produces a new Engine. -func NewEngine(mgr manager.Manager, o ...EngineOption) *Engine { - e := &Engine{ +// New produces a new ControllerEngine. +func New(mgr manager.Manager, o ...ControllerEngineOption) *ControllerEngine { + e := &ControllerEngine{ mgr: mgr, started: make(map[string]context.CancelFunc), @@ -111,7 +112,7 @@ func NewEngine(mgr manager.Manager, o ...EngineOption) *Engine { // IsRunning indicates whether the named controller is running - i.e. whether it // has been started and does not appear to have crashed. -func (e *Engine) IsRunning(name string) bool { +func (e *ControllerEngine) IsRunning(name string) bool { e.mx.RLock() defer e.mx.RUnlock() @@ -121,7 +122,7 @@ func (e *Engine) IsRunning(name string) bool { // Err returns any error encountered by the named controller. The returned error // is always nil if the named controller is running. -func (e *Engine) Err(name string) error { +func (e *ControllerEngine) Err(name string) error { e.mx.RLock() defer e.mx.RUnlock() @@ -129,11 +130,11 @@ func (e *Engine) Err(name string) error { } // Stop the named controller. -func (e *Engine) Stop(name string) { +func (e *ControllerEngine) Stop(name string) { e.done(name, nil) } -func (e *Engine) done(name string, err error) { +func (e *ControllerEngine) done(name string, err error) { e.mx.Lock() defer e.mx.Unlock() @@ -160,17 +161,18 @@ type Watch struct { predicates []predicate.Predicate } -// For returns a Watch for the supplied kind of object. Events will be handled -// by the supplied EventHandler, and may be filtered by the supplied predicates. -func For(kind client.Object, h handler.EventHandler, p ...predicate.Predicate) Watch { +// WatchFor returns a Watch for the supplied kind of object. Events will be +// handled by the supplied EventHandler, and may be filtered by the supplied +// predicates. +func WatchFor(kind client.Object, h handler.EventHandler, p ...predicate.Predicate) Watch { return Watch{kind: kind, handler: h, predicates: p} } -// TriggeredBy returns a custom watch for secondary resources triggering the -// controller. source.Kind can be used to create a source for a secondary cache. -// Events will be handled by the supplied EventHandler, and may be filtered by -// the supplied predicates. -func TriggeredBy(source source.Source, h handler.EventHandler, p ...predicate.Predicate) Watch { +// WatchTriggeredBy returns a custom watch for secondary resources triggering +// the controller. source.Kind can be used to create a source for a secondary +// cache. Events will be handled by the supplied EventHandler, and may be +// filtered by the supplied predicates. +func WatchTriggeredBy(source source.Source, h handler.EventHandler, p ...predicate.Predicate) Watch { return Watch{customSource: source, handler: h, predicates: p} } @@ -178,7 +180,7 @@ func TriggeredBy(source source.Source, h handler.EventHandler, p ...predicate.Pr // whose lifecycle is coupled to the controller. The controller is started with // the supplied options, and configured with the supplied watches. Start does // not block. -func (e *Engine) Start(name string, o controller.Options, w ...Watch) error { +func (e *ControllerEngine) Start(name string, o controller.Options, w ...Watch) error { c, err := e.Create(name, o, w...) if err != nil { return err @@ -195,7 +197,7 @@ type NamedController interface { type namedController struct { name string - e *Engine + e *ControllerEngine ca cache.Cache ctrl controller.Controller } @@ -204,7 +206,7 @@ type namedController struct { // whose lifecycle is coupled to the controller. The controller is created with // the supplied options, and configured with the supplied watches. It is not // started yet. -func (e *Engine) Create(name string, o controller.Options, w ...Watch) (NamedController, error) { +func (e *ControllerEngine) Create(name string, o controller.Options, w ...Watch) (NamedController, error) { // Each controller gets its own cache for the GVKs it owns. This cache is // wrapped by a GVKRoutedCache that routes requests to other GVKs to the // manager's cache. This way we can share informers for composed resources diff --git a/internal/controller/engine/engine_test.go b/internal/controller/engine/engine_test.go index d49ac2a09..f68eb8f92 100644 --- a/internal/controller/engine/engine_test.go +++ b/internal/controller/engine/engine_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package engine import ( "context" @@ -80,13 +80,13 @@ func TestEngine(t *testing.T) { } cases := map[string]struct { reason string - e *Engine + e *ControllerEngine args args want want }{ "NewCacheError": { reason: "Errors creating a new cache should be returned", - e: NewEngine(&fake.Manager{}, + e: New(&fake.Manager{}, WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, errBoom }), ), args: args{ @@ -98,7 +98,7 @@ func TestEngine(t *testing.T) { }, "NewControllerError": { reason: "Errors creating a new controller should be returned", - e: NewEngine( + e: New( &fake.Manager{ Scheme: runtime.NewScheme(), Cache: &MockCache{}, @@ -115,7 +115,7 @@ func TestEngine(t *testing.T) { }, "WatchError": { reason: "Errors adding a watch should be returned", - e: NewEngine( + e: New( &fake.Manager{ Scheme: runtime.NewScheme(), Cache: &MockCache{}, @@ -128,7 +128,7 @@ func TestEngine(t *testing.T) { ), args: args{ name: "coolcontroller", - w: []Watch{For(&unstructured.Unstructured{ + w: []Watch{WatchFor(&unstructured.Unstructured{ Object: map[string]interface{}{"apiVersion": "example.org/v1", "kind": "Thing"}, }, nil)}, }, @@ -138,7 +138,7 @@ func TestEngine(t *testing.T) { }, "SchemeError": { reason: "Passing an object of unknown GVK", - e: NewEngine( + e: New( &fake.Manager{ Scheme: runtime.NewScheme(), Cache: &MockCache{}, @@ -151,7 +151,7 @@ func TestEngine(t *testing.T) { ), args: args{ name: "coolcontroller", - w: []Watch{For(&unstructured.Unstructured{}, nil)}, + w: []Watch{WatchFor(&unstructured.Unstructured{}, nil)}, }, want: want{ err: errors.Wrap(runtime.NewMissingKindErr("unstructured object has no kind"), "failed to get GVK for type *unstructured.Unstructured"), @@ -159,7 +159,7 @@ func TestEngine(t *testing.T) { }, "CacheCrashError": { reason: "Errors starting or running a cache should be returned", - e: NewEngine(&fake.Manager{}, + e: New(&fake.Manager{}, WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { c := &MockCache{MockStart: func(_ context.Context) error { return errBoom }} return c, nil @@ -180,7 +180,7 @@ func TestEngine(t *testing.T) { }, "ControllerCrashError": { reason: "Errors starting or running a controller should be returned", - e: NewEngine(&fake.Manager{}, + e: New(&fake.Manager{}, WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { c := &MockCache{MockStart: func(_ context.Context) error { return nil From bb6cbc85644f7fc135b301bea33ac674fbf69fef Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 13:45:16 -0700 Subject: [PATCH 203/370] Use the c/c controller engine and GVK routed cache, not c/cr Signed-off-by: Nic Cope --- .../apiextensions/definition/composed.go | 4 ++-- .../apiextensions/definition/reconciler.go | 19 ++++++++++--------- .../definition/reconciler_test.go | 17 +++++++++-------- .../apiextensions/offered/reconciler.go | 9 +++++---- .../apiextensions/offered/reconciler_test.go | 14 ++++++++------ 5 files changed, 34 insertions(+), 29 deletions(-) diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go index 3abd6c1b0..9084529b9 100644 --- a/internal/controller/apiextensions/definition/composed.go +++ b/internal/controller/apiextensions/definition/composed.go @@ -35,9 +35,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -52,7 +52,7 @@ type composedResourceInformers struct { log logging.Logger cluster cluster.Cluster - gvkRoutedCache *controller.GVKRoutedCache + gvkRoutedCache *engine.GVKRoutedCache lock sync.RWMutex // everything below is protected by this lock diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 2fa6f0470..aeefdd200 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -58,6 +58,7 @@ import ( "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -98,8 +99,8 @@ const ( // A ControllerEngine can start and stop Kubernetes controllers on demand. type ControllerEngine interface { IsRunning(name string) bool - Create(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) - Start(name string, o kcontroller.Options, w ...controller.Watch) error + Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) + Start(name string, o kcontroller.Options, w ...engine.Watch) error Stop(name string) Err(name string) error } @@ -219,7 +220,7 @@ type definition struct { func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { kube := unstructured.NewClient(mgr.GetClient()) - ca := controller.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()) + ca := engine.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()) r := &Reconciler{ mgr: mgr, @@ -231,7 +232,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { composite: definition{ CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), - ControllerEngine: controller.NewEngine(mgr), + ControllerEngine: engine.New(mgr), Finalizer: resource.NewAPIFinalizer(kube, finalizer), }, @@ -259,7 +260,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { // wrap the manager's cache to route requests to dynamically started // informers for managed resources. - r.mgr = controller.WithGVKRoutedCache(ca, mgr) + r.mgr = engine.WithGVKRoutedCache(ca, mgr) } return r @@ -489,16 +490,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco name := composite.ControllerName(d.GetName()) var ca cache.Cache - watches := []controller.Watch{ - controller.For(u, &handler.EnqueueRequestForObject{}), + watches := []engine.Watch{ + engine.WatchFor(u, &handler.EnqueueRequestForObject{}), // enqueue composites whenever a matching CompositionRevision is created - controller.TriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}), handler.Funcs{ + engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}), handler.Funcs{ CreateFunc: composite.EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), }), } if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { // enqueue XRs that when a relevant MR is updated - watches = append(watches, controller.TriggeredBy(&r.xrInformers, handler.Funcs{ + watches = append(watches, engine.WatchTriggeredBy(&r.xrInformers, handler.Funcs{ UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) }, diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index 78a5ef4f7..e89f2322e 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -46,14 +46,15 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/features" ) type MockEngine struct { ControllerEngine MockIsRunning func(name string) bool - MockCreate func(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) - MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error + MockCreate func(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) + MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error MockStop func(name string) MockErr func(name string) error } @@ -62,11 +63,11 @@ func (m *MockEngine) IsRunning(name string) bool { return m.MockIsRunning(name) } -func (m *MockEngine) Create(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) { +func (m *MockEngine) Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) { return m.MockCreate(name, o, w...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...controller.Watch) error { +func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { return m.MockStart(name, o, w...) } @@ -574,7 +575,7 @@ func TestReconcile(t *testing.T) { WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return false }, MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { + MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { return nil, errBoom }, }), @@ -635,7 +636,7 @@ func TestReconcile(t *testing.T) { WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return false }, MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { + MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { return mockNamedController{ MockStart: func(_ context.Context) error { return nil }, MockGetCache: func() cache.Cache { @@ -719,7 +720,7 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { + MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { return mockNamedController{ MockStart: func(_ context.Context) error { return nil }, MockGetCache: func() cache.Cache { @@ -792,7 +793,7 @@ func TestReconcile(t *testing.T) { WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return true }, MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { + MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { t.Errorf("MockCreate should not be called") return nil, nil }, diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index 2111294d9..1c5d0e2a9 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -50,6 +50,7 @@ import ( secretsv1alpha1 "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/claim" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/names" "github.com/crossplane/crossplane/internal/xcrd" @@ -91,7 +92,7 @@ const ( // A ControllerEngine can start and stop Kubernetes controllers on demand. type ControllerEngine interface { IsRunning(name string) bool - Start(name string, o kcontroller.Options, w ...controller.Watch) error + Start(name string, o kcontroller.Options, w ...engine.Watch) error Stop(name string) Err(name string) error } @@ -202,7 +203,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { claim: definition{ CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), - ControllerEngine: controller.NewEngine(mgr), + ControllerEngine: engine.New(mgr), Finalizer: resource.NewAPIFinalizer(kube, finalizer), }, @@ -453,8 +454,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco cp.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) if err := r.claim.Start(claim.ControllerName(d.GetName()), ko, - controller.For(cm, &handler.EnqueueRequestForObject{}), - controller.For(cp, &EnqueueRequestForClaim{}), + engine.WatchFor(cm, &handler.EnqueueRequestForObject{}), + engine.WatchFor(cp, &EnqueueRequestForClaim{}), ); err != nil { err = errors.Wrap(err, errStartController) r.record.Event(d, event.Warning(reasonOfferXRC, err)) diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index 59166a641..1be2ad2ad 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource" @@ -42,16 +41,19 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/controller/engine" ) +var _ ControllerEngine = &MockEngine{} + type MockEngine struct { ControllerEngine - MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error + MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error MockStop func(name string) MockErr func(name string) error } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...controller.Watch) error { +func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { return m.MockStart(name, o, w...) } @@ -552,7 +554,7 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return errBoom }, + MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return errBoom }, }), }, }, @@ -596,7 +598,7 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }, + MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, }, ), }, @@ -656,7 +658,7 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }, + MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, MockStop: func(_ string) {}, }), }, From 89ce6c71b9cbfff34678749c497fa5d5c6fd0645 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 14:48:11 -0700 Subject: [PATCH 204/370] Bump to latest runtime and kubernetes deps This bumps crossplane-runtime, controller-runtime, and k8s.io dependencies to latest. Per the below PR, the latest crossplane-runtime doesn't have the controller engine anymore. It moved into c/c. https://github.com/crossplane/crossplane-runtime/pull/689 Signed-off-by: Nic Cope --- .golangci.yml | 5 + ...plane.io_compositeresourcedefinitions.yaml | 5 + ....crossplane.io_configurationrevisions.yaml | 5 + .../pkg.crossplane.io_controllerconfigs.yaml | 183 ++++++-- ...rossplane.io_deploymentruntimeconfigs.yaml | 392 ++++++++++++++++-- .../pkg.crossplane.io_functionrevisions.yaml | 5 + .../pkg.crossplane.io_providerrevisions.yaml | 5 + .../meta.pkg.crossplane.io_providers.yaml | 10 + go.mod | 71 ++-- go.sum | 187 ++++----- .../apiextensions/composite/reconciler.go | 10 +- .../composite/reconciler_test.go | 20 +- .../apiextensions/definition/composed.go | 12 +- .../apiextensions/definition/indexes.go | 46 +- .../apiextensions/definition/reconciler.go | 30 +- internal/controller/engine/engine.go | 12 +- internal/controller/engine/engine_test.go | 12 +- 17 files changed, 751 insertions(+), 259 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c62817d9a..e0e797139 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,6 +103,10 @@ linters: # to communicate what the bool means. - nonamedreturns + # TODO(negz): We do want these as of Go v1.22. + - copyloopvar + - intrange + linters-settings: errcheck: # report about not checking of errors in type assetions: `a := b.(MyStruct)`; @@ -224,6 +228,7 @@ issues: # Excluding generated files. exclude-files: - "zz_generated\\..+\\.go$" + - ".+\\.pb.go$" # Excluding configuration per-path and per-linter. exclude-rules: # Exclude some linters from running on tests files. diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 99c507489..6b6af162d 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -79,6 +79,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic kind: description: |- kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -103,6 +104,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic singular: description: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. @@ -222,6 +224,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - conversionReviewVersions type: object @@ -312,6 +315,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic kind: description: |- kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -336,6 +340,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic singular: description: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 7fb52c93e..bd101a2e8 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -242,6 +242,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -250,6 +251,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -257,18 +259,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 3cee9ece8..cf23a2caf 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -111,11 +111,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -143,11 +145,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -160,6 +164,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -204,11 +209,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -236,14 +243,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -304,11 +314,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -323,12 +335,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -338,12 +350,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -384,11 +396,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -408,6 +422,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -430,6 +445,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -479,11 +495,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -498,12 +516,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -513,12 +531,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -558,11 +576,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -582,6 +602,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -594,6 +615,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -651,11 +673,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -670,12 +694,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -685,12 +709,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -731,11 +755,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -755,6 +781,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -777,6 +804,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -826,11 +854,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -845,12 +875,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -860,12 +890,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -905,11 +935,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -929,6 +961,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -941,6 +974,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object args: @@ -1195,6 +1229,29 @@ spec: PodSecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object fsGroup: description: |- A special supplemental group that applies to all containers in a pod. @@ -1314,6 +1371,7 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -1333,6 +1391,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: description: |- The Windows specific settings applied to all containers. @@ -1505,6 +1564,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -1517,12 +1600,14 @@ spec: description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -1730,6 +1815,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. @@ -1739,6 +1826,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -1867,6 +1977,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' @@ -1994,6 +2105,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -2077,8 +2189,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' + only annotations, labels, name, namespace and uid + are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -2137,6 +2249,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: description: |- @@ -2259,6 +2372,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -2403,11 +2517,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2435,7 +2551,7 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: @@ -2479,6 +2595,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: description: |- wwids Optional: FC volume world wide identifiers (wwids) @@ -2486,6 +2603,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: description: |- @@ -2706,6 +2824,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: description: |- readOnly here will force the ReadOnly setting in VolumeMounts. @@ -2894,11 +3013,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2977,6 +3098,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -3004,7 +3126,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -3067,6 +3189,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data @@ -3110,6 +3233,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -3153,6 +3277,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: description: quobyte represents a Quobyte mount on the host @@ -3223,6 +3348,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic pool: description: |- pool is the rados pool name. @@ -3377,6 +3503,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: description: optional field specify whether the Secret or its keys must be defined diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index 7b80a697f..f876d0e55 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -152,11 +152,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -312,11 +314,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -345,11 +349,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -363,6 +369,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -408,11 +415,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -441,14 +450,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -514,11 +526,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -533,12 +547,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -548,12 +562,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -596,11 +610,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -620,6 +636,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -642,6 +659,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -693,11 +711,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -712,12 +732,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -727,12 +747,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -774,11 +794,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -798,6 +820,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -810,6 +833,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -871,11 +895,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -890,12 +916,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -905,12 +931,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -953,11 +979,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -977,6 +1005,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -999,6 +1028,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -1050,11 +1080,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1069,12 +1101,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -1084,12 +1116,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -1131,11 +1163,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1155,6 +1189,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -1167,6 +1202,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1197,6 +1233,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -1210,6 +1247,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -1334,6 +1372,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -1383,6 +1424,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -1424,6 +1466,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -1457,6 +1500,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1541,6 +1585,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -1574,6 +1619,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1654,6 +1700,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -1714,6 +1761,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1873,6 +1921,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -1933,6 +1982,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2130,6 +2180,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -2143,6 +2217,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -2150,6 +2225,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -2309,6 +2385,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -2369,6 +2446,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2512,6 +2590,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -2531,6 +2612,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -2541,6 +2624,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -2558,6 +2664,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -2569,6 +2678,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map dnsConfig: description: |- Specifies the DNS parameters of a pod. @@ -2583,6 +2695,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: description: |- A list of DNS resolver options. @@ -2600,6 +2713,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: description: |- A list of DNS search domains for host-name lookup. @@ -2608,6 +2722,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: description: |- @@ -2655,6 +2770,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -2668,6 +2784,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -2792,6 +2909,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -2841,6 +2961,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -2879,6 +3000,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -2912,6 +3034,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2996,6 +3119,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -3029,6 +3153,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3106,6 +3231,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3166,6 +3292,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3315,6 +3442,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3375,6 +3503,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3559,6 +3688,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -3572,6 +3725,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -3579,6 +3733,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -3732,6 +3887,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3792,6 +3948,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3945,6 +4102,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. @@ -3964,6 +4124,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -3974,6 +4136,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -3991,6 +4176,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -4002,10 +4190,13 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map hostAliases: description: |- HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. + file if specified. items: description: |- HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -4016,11 +4207,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic ip: description: IP address of the host file entry. type: string type: object type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map hostIPC: description: |- Use the host's ipc namespace. @@ -4073,6 +4268,9 @@ spec: type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map initContainers: description: |- List of initialization containers belonging to the pod. @@ -4105,6 +4303,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -4118,6 +4317,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -4242,6 +4442,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -4291,6 +4494,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -4332,6 +4536,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -4365,6 +4570,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4449,6 +4655,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -4482,6 +4689,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4562,6 +4770,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -4622,6 +4831,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4781,6 +4991,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -4841,6 +5052,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -5038,6 +5250,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -5051,6 +5287,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -5058,6 +5295,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -5217,6 +5455,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -5277,6 +5516,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -5420,6 +5660,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -5439,6 +5682,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -5449,6 +5694,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -5466,6 +5734,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -5477,6 +5748,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map nodeName: description: |- NodeName is a request to schedule this pod onto a specific node. If it is non-empty, @@ -5506,6 +5780,7 @@ spec: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup @@ -5515,6 +5790,7 @@ spec: - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities @@ -5594,6 +5870,7 @@ spec: - conditionType type: object type: array + x-kubernetes-list-type: atomic resourceClaims: description: |- ResourceClaims defines which ResourceClaims must be allocated @@ -5680,9 +5957,6 @@ spec: SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -5704,6 +5978,29 @@ spec: SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object fsGroup: description: |- A special supplemental group that applies to all containers in a pod. @@ -5823,6 +6120,7 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -5843,6 +6141,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: description: |- The Windows specific settings applied to all containers. @@ -5878,7 +6177,7 @@ spec: type: object serviceAccount: description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. type: string serviceAccountName: @@ -5958,6 +6257,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic topologySpreadConstraints: description: |- TopologySpreadConstraints describes how a group of pods ought to spread across topology @@ -6000,11 +6300,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -6075,9 +6377,6 @@ spec: In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: @@ -6264,6 +6563,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: description: 'path is Optional: Used as the mounted root, rather than the full @@ -6393,6 +6693,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -6480,7 +6781,8 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, - labels, name and namespace are supported.' + labels, name, namespace and uid + are supported.' properties: apiVersion: description: Version of the schema @@ -6546,6 +6848,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: description: |- @@ -6668,6 +6971,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -6815,11 +7119,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -6847,7 +7153,7 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: @@ -6893,6 +7199,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: description: |- wwids Optional: FC volume world wide identifiers (wwids) @@ -6900,6 +7207,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: description: |- @@ -7124,6 +7432,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: description: |- readOnly here will force the ReadOnly setting in VolumeMounts. @@ -7320,11 +7629,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7405,6 +7716,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -7435,8 +7747,8 @@ spec: description: 'Required: Selects a field of the pod: only annotations, - labels, name and namespace - are supported.' + labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version @@ -7510,6 +7822,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about @@ -7554,6 +7867,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: description: |- Name of the referent. @@ -7599,6 +7913,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: description: quobyte represents a Quobyte mount @@ -7669,6 +7984,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic pool: description: |- pool is the rados pool name. @@ -7828,6 +8144,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: description: optional field specify whether the Secret or its keys must be defined @@ -7914,6 +8231,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map required: - containers type: object diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 7fb151816..d7ba776e2 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -290,6 +290,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -298,6 +299,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -305,18 +307,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 961bfc454..439140a4c 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -285,6 +285,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -293,6 +294,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -300,18 +302,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cluster/meta/meta.pkg.crossplane.io_providers.yaml b/cluster/meta/meta.pkg.crossplane.io_providers.yaml index 69bf74a19..b7997e5a9 100644 --- a/cluster/meta/meta.pkg.crossplane.io_providers.yaml +++ b/cluster/meta/meta.pkg.crossplane.io_providers.yaml @@ -62,6 +62,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -70,6 +71,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that @@ -77,12 +79,14 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents @@ -90,6 +94,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object @@ -186,6 +191,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -194,6 +200,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that @@ -201,12 +208,14 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents @@ -214,6 +223,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/go.mod b/go.mod index 228ae8752..5e0fcdc4c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crossplane/crossplane -go 1.21 +go 1.22.0 toolchain go1.22.3 @@ -9,7 +9,7 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.8.1 - github.com/crossplane/crossplane-runtime v1.17.0-rc.0 + github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.2 @@ -24,21 +24,21 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.11.0 github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e - golang.org/x/sync v0.6.0 - google.golang.org/grpc v1.61.0 + golang.org/x/sync v0.7.0 + google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/apiserver v0.29.1 + k8s.io/api v0.30.0 + k8s.io/apiextensions-apiserver v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/apiserver v0.30.0 k8s.io/cli-runtime v0.29.1 - k8s.io/client-go v0.29.1 - k8s.io/code-generator v0.29.1 + k8s.io/client-go v0.30.0 + k8s.io/code-generator v0.30.0 k8s.io/kubectl v0.29.1 k8s.io/metrics v0.29.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.17.0 + sigs.k8s.io/controller-runtime v0.18.2 sigs.k8s.io/controller-tools v0.14.0 sigs.k8s.io/e2e-framework v0.3.0 sigs.k8s.io/kind v0.20.0 @@ -53,6 +53,7 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -62,7 +63,7 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -81,17 +82,20 @@ require ( github.com/stoewer/go-strcase v1.3.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect + go.opentelemetry.io/otel/sdk v1.26.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect ) require ( - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -132,7 +136,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 @@ -144,7 +148,7 @@ require ( github.com/gobuffalo/flect v1.0.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 @@ -174,30 +178,29 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vladimirvivien/gexe v0.2.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.23.0 - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.20.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.29.1 // indirect - k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/component-base v0.30.0 // indirect + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 258458178..3ed062774 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -96,8 +96,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -125,8 +125,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0 h1:v+JZ+94bQhunadP3wM64Mw6OnpPTwmiZRrShZEUQoMI= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0/go.mod h1:Pz2tdGVMF6KDGzHZOkvKro0nKc8EzK0sb/nSA7pH4Dc= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8 h1:4OtdWor2ixE1Nk+96//Knf63wRiyKFnxFcJx4iM+WY0= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8/go.mod h1:aZ1ODIvtOPFFVZ9oo3qVH/MQHuim24qWg2Tj6n+AIf4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= @@ -172,8 +172,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -194,9 +194,7 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -235,12 +233,12 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -261,11 +259,10 @@ github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926- github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240422182052-72c8669ad3e7 h1:3q13T5NW3mlTJZM6B5UAsf2N5NYFbYWIyI3W8DlvBDU= +github.com/google/pprof v0.0.0-20240422182052-72c8669ad3e7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -279,8 +276,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -308,7 +305,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -354,10 +350,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -414,8 +410,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e h1:aNzUuv4ZKH2OT3Qv6dpZxkMPDOfl/6MoS79T/zUzako= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e/go.mod h1:IDIbYDb9fbedtxCc2CrdGcVRol6la7z2gkKh0VYWVGk= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= @@ -438,32 +434,32 @@ go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -474,11 +470,11 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -488,8 +484,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -506,11 +502,11 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -519,8 +515,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -544,8 +540,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -553,8 +549,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -574,15 +570,14 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -596,17 +591,17 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -644,39 +639,38 @@ gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= +k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= +k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/code-generator v0.29.1 h1:8ba8BdtSmAVHgAMpzThb/fuyQeTRtN7NtN7VjMcDLew= -k8s.io/code-generator v0.29.1/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= +k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= +k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= +k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= k8s.io/metrics v0.29.1 h1:qutc3aIPMCniMuEApuLaeYX47rdCn8eycVDx7R6wMlQ= k8s.io/metrics v0.29.1/go.mod h1:JrbV2U71+v7d/9qb90UVKL8r0uJ6Z2Hy4V7mDm05cKs= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s= -sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q= +sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= @@ -691,7 +685,6 @@ sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 2d5b69304..e2d634a55 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -736,13 +736,9 @@ func getComposerResourcesNames(cds []ComposedResource) []string { // related) XRs when a new CompositionRevision is created. This speeds up // reconciliation of XRs on changes to the Composition by not having to wait for // the 60s sync period, but be instant. -func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.CreateEvent, q workqueue.RateLimitingInterface) { - return func(ctx context.Context, createEvent runtimeevent.CreateEvent, q workqueue.RateLimitingInterface) { - rev, ok := createEvent.Object.(*v1.CompositionRevision) - if !ok { - // should not happen - return - } +func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { + return func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { + rev := createEvent.Object // get all XRs xrs := kunstructured.UnstructuredList{} diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 5c42c3715..fbefe27eb 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -848,7 +848,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { type args struct { of schema.GroupVersionKind list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - event runtimeevent.CreateEvent + event runtimeevent.TypedCreateEvent[*v1.CompositionRevision] } type want struct { added []interface{} @@ -879,6 +879,16 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { } return nil }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, }, }, { @@ -897,7 +907,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { return nil }, - event: runtimeevent.CreateEvent{ + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -931,7 +941,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { return nil }, - event: runtimeevent.CreateEvent{ + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -960,7 +970,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { return nil }, - event: runtimeevent.CreateEvent{ + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -1005,7 +1015,7 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { return nil }, - event: runtimeevent.CreateEvent{ + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go index 9084529b9..a09ff7769 100644 --- a/internal/controller/apiextensions/definition/composed.go +++ b/internal/controller/apiextensions/definition/composed.go @@ -52,6 +52,9 @@ type composedResourceInformers struct { log logging.Logger cluster cluster.Cluster + handler handler.EventHandler + ps []predicate.Predicate + gvkRoutedCache *engine.GVKRoutedCache lock sync.RWMutex // everything below is protected by this lock @@ -78,18 +81,18 @@ var _ source.Source = &composedResourceInformers{} // ctx is done. // Note that Start can be called multiple times to deliver events to multiple // (composite resource) controllers. -func (i *composedResourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { +func (i *composedResourceInformers) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { id := uuid.New().String() i.lock.Lock() defer i.lock.Unlock() i.sinks[id] = func(ev runtimeevent.UpdateEvent) { - for _, p := range ps { + for _, p := range i.ps { if !p.Update(ev) { return } } - h.Update(ctx, ev, q) + i.handler.Update(ctx, ev, q) } go func() { @@ -195,6 +198,7 @@ func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupV _ = ca.Start(ctx) }() + // TODO(negz): We should take a write lock before writing to this map. i.cdCaches[gvk] = cdCache{ cache: ca, cancelFn: cancelFn, @@ -266,6 +270,8 @@ func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context inf.cancelFn() i.gvkRoutedCache.RemoveDelegate(gvk) i.log.Info("Stopped composed resource watch", "gvk", gvk.String()) + + // TODO(negz): We should take a write lock before writing to this map. delete(i.cdCaches, gvk) } } diff --git a/internal/controller/apiextensions/definition/indexes.go b/internal/controller/apiextensions/definition/indexes.go index 94074885f..6aaca2cd8 100644 --- a/internal/controller/apiextensions/definition/indexes.go +++ b/internal/controller/apiextensions/definition/indexes.go @@ -17,19 +17,12 @@ limitations under the License. package definition import ( - "context" "fmt" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" ) @@ -86,22 +79,25 @@ func refKey(ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) } -func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) +// TODO(negz): Figure out a way to plumb this with controller-runtime v0.18.x +// style sources. - composites := kunstructured.UnstructuredList{} - composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { - log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) - return - } - - // queue those composites for reconciliation - for _, xr := range composites.Items { - log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) - } - } -} +// func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { +// return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { +// mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() +// key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) +// +// composites := kunstructured.UnstructuredList{} +// composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) +// if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { +// log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) +// return +// } +// +// // queue those composites for reconciliation +// for _, xr := range composites.Items { +// log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) +// q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) +// } +// } +// } diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index aeefdd200..bca9aed00 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -493,18 +493,26 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco watches := []engine.Watch{ engine.WatchFor(u, &handler.EnqueueRequestForObject{}), // enqueue composites whenever a matching CompositionRevision is created - engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}), handler.Funcs{ + engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}, handler.TypedFuncs[*v1.CompositionRevision]{ CreateFunc: composite.EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), - }), - } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // enqueue XRs that when a relevant MR is updated - watches = append(watches, engine.WatchTriggeredBy(&r.xrInformers, handler.Funcs{ - UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) - }, - })) - } + })), + } + + // TODO(negz): I can't find a great way to plumb this. We now need to pass + // the handler when creating the source (i.e. the xrInformers). The + // xrInformers is designed to handle multiple types, though. Given I plan to + // try refactor realtime compositions to make use of new controller-runtime + // functionality around stopping informers, I'm going to just comment it out + // rather than spend time getting it working. + + // if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + // // enqueue XRs that when a relevant MR is updated + // watches = append(watches, engine.WatchTriggeredBy(&r.xrInformers, handler.TypedFuncs[*kunstructured.Unstructured]{ + // UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent[*kunstructured.Unstructured], q workqueue.RateLimitingInterface) { + // enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) + // }, + // })) + // } c, err := r.composite.Create(name, ko, watches...) if err != nil { diff --git a/internal/controller/engine/engine.go b/internal/controller/engine/engine.go index 4707d9576..552aeb1a4 100644 --- a/internal/controller/engine/engine.go +++ b/internal/controller/engine/engine.go @@ -153,10 +153,10 @@ func (e *ControllerEngine) done(name string, err error) { // Watch an object. type Watch struct { - // one of the two: - kind client.Object + // A watch is either a customSource, or a kind, handler, and predicates. customSource source.Source + kind client.Object handler handler.EventHandler predicates []predicate.Predicate } @@ -172,8 +172,8 @@ func WatchFor(kind client.Object, h handler.EventHandler, p ...predicate.Predica // the controller. source.Kind can be used to create a source for a secondary // cache. Events will be handled by the supplied EventHandler, and may be // filtered by the supplied predicates. -func WatchTriggeredBy(source source.Source, h handler.EventHandler, p ...predicate.Predicate) Watch { - return Watch{customSource: source, handler: h, predicates: p} +func WatchTriggeredBy(source source.Source) Watch { + return Watch{customSource: source} } // Start the named controller. Each controller is started with its own cache @@ -236,7 +236,7 @@ func (e *ControllerEngine) Create(name string, o controller.Options, w ...Watch) for _, wt := range w { if wt.customSource != nil { - if err := ctrl.Watch(wt.customSource, wt.handler, wt.predicates...); err != nil { + if err := ctrl.Watch(wt.customSource); err != nil { return nil, errors.Wrap(err, errWatch) } continue @@ -249,7 +249,7 @@ func (e *ControllerEngine) Create(name string, o controller.Options, w ...Watch) } rc.AddDelegate(gvk, ca) - if err := ctrl.Watch(source.Kind(ca, wt.kind), wt.handler, wt.predicates...); err != nil { + if err := ctrl.Watch(source.Kind(ca, wt.kind, wt.handler, wt.predicates...)); err != nil { return nil, errors.Wrap(err, errWatch) } } diff --git a/internal/controller/engine/engine_test.go b/internal/controller/engine/engine_test.go index f68eb8f92..540ddee6e 100644 --- a/internal/controller/engine/engine_test.go +++ b/internal/controller/engine/engine_test.go @@ -27,9 +27,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -55,15 +53,15 @@ type MockController struct { controller.Controller MockStart func(stop context.Context) error - MockWatch func(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error + MockWatch func(s source.Source) error } func (c *MockController) Start(stop context.Context) error { return c.MockStart(stop) } -func (c *MockController) Watch(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error { - return c.MockWatch(s, h, p...) +func (c *MockController) Watch(s source.Source) error { + return c.MockWatch(s) } func TestEngine(t *testing.T) { @@ -122,7 +120,7 @@ func TestEngine(t *testing.T) { }, WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source, handler.EventHandler, ...predicate.Predicate) error { return errBoom }} + c := &MockController{MockWatch: func(source.Source) error { return errBoom }} return c, nil }), ), @@ -145,7 +143,7 @@ func TestEngine(t *testing.T) { }, WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source, handler.EventHandler, ...predicate.Predicate) error { return errBoom }} + c := &MockController{MockWatch: func(source.Source) error { return errBoom }} return c, nil }), ), From 05bd8bd153618cb68a6ddd9f9e8ddea9e477daa2 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 5 May 2024 16:29:10 -0700 Subject: [PATCH 205/370] Linter updates due to bumping go.mod to Go v1.22 Updating our controller-runtime and Kubernetes dependencies bumped our minimum Go version to v1.22. That in turn enables some new linters, since we no longer need to copy range vars in Go v1.22. Signed-off-by: Nic Cope --- .golangci.yml | 13 ++++++++++--- apis/pkg/v1/interfaces.go | 2 -- apis/pkg/v1beta1/function_interfaces.go | 1 - apis/pkg/v1beta1/lock.go | 2 -- cmd/crank/beta/render/render.go | 1 - cmd/crank/xpkg/push.go | 1 - .../composite/composition_functions.go | 1 - .../apiextensions/composite/composition_pt.go | 2 +- .../apiextensions/composite/environment_selector.go | 2 +- .../controller/apiextensions/definition/composed.go | 2 -- .../controller/apiextensions/usage/reconciler.go | 4 ++-- internal/controller/pkg/revision/establisher.go | 3 --- internal/controller/rbac/definition/reconciler.go | 1 - .../controller/rbac/provider/roles/reconciler.go | 1 - internal/dag/dag_test.go | 2 -- internal/dag/fuzz_test.go | 2 -- internal/names/generate.go | 2 +- .../apiextensions/v1/composition/handler.go | 1 - internal/xpkg/fuzz_test.go | 2 +- internal/xpkg/scheme.go | 1 - .../apiextensions/v1/composition/patches.go | 1 - .../apiextensions/v1/composition/validator_test.go | 1 - test/e2e/funcs/feature.go | 1 - 23 files changed, 16 insertions(+), 33 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e0e797139..7e46dbe35 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,9 +103,9 @@ linters: # to communicate what the bool means. - nonamedreturns - # TODO(negz): We do want these as of Go v1.22. - - copyloopvar - - intrange + # Warns about taking the address of a range variable. This isn't an issue in + # Go v1.22 and above: https://tip.golang.org/doc/go1.22 + - exportloopref linters-settings: errcheck: @@ -287,6 +287,13 @@ issues: linters: - gosec - gas + + # This is about implicit memory aliasing in a range loop. + # This is a false positive with Go v1.22 and above. + - text: "G601:" + linters: + - gosec + - gas # Some k8s dependencies do not have JSON tags on all fields in structs. - path: k8s.io/ diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index 2b9500084..bcb00885e 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -726,7 +726,6 @@ type PackageRevisionList interface { func (p *ProviderRevisionList) GetRevisions() []PackageRevision { prs := make([]PackageRevision, len(p.Items)) for i, r := range p.Items { - r := r // Pin range variable so we can take its address. prs[i] = &r } return prs @@ -736,7 +735,6 @@ func (p *ProviderRevisionList) GetRevisions() []PackageRevision { func (p *ConfigurationRevisionList) GetRevisions() []PackageRevision { prs := make([]PackageRevision, len(p.Items)) for i, r := range p.Items { - r := r // Pin range variable so we can take its address. prs[i] = &r } return prs diff --git a/apis/pkg/v1beta1/function_interfaces.go b/apis/pkg/v1beta1/function_interfaces.go index 4166f4f16..7e99faf1c 100644 --- a/apis/pkg/v1beta1/function_interfaces.go +++ b/apis/pkg/v1beta1/function_interfaces.go @@ -328,7 +328,6 @@ func (r *FunctionRevision) SetCommonLabels(l map[string]string) { func (p *FunctionRevisionList) GetRevisions() []v1.PackageRevision { prs := make([]v1.PackageRevision, len(p.Items)) for i, r := range p.Items { - r := r // Pin range variable so we can take its address. prs[i] = &r } return prs diff --git a/apis/pkg/v1beta1/lock.go b/apis/pkg/v1beta1/lock.go index 955d765a8..c6c102146 100644 --- a/apis/pkg/v1beta1/lock.go +++ b/apis/pkg/v1beta1/lock.go @@ -60,7 +60,6 @@ type LockPackage struct { func ToNodes(pkgs ...LockPackage) []dag.Node { nodes := make([]dag.Node, len(pkgs)) for i, r := range pkgs { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes @@ -75,7 +74,6 @@ func (l *LockPackage) Identifier() string { func (l *LockPackage) Neighbors() []dag.Node { nodes := make([]dag.Node, len(l.Dependencies)) for i, r := range l.Dependencies { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index 37268302e..c03e4d581 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -335,7 +335,6 @@ func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.R } out := &fnv1beta1.Resources{} for _, er := range ers { - er := er if selector.GetApiVersion() != er.GetAPIVersion() { continue } diff --git a/cmd/crank/xpkg/push.go b/cmd/crank/xpkg/push.go index a97832f40..1aceab488 100644 --- a/cmd/crank/xpkg/push.go +++ b/cmd/crank/xpkg/push.go @@ -152,7 +152,6 @@ func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocognit // This f adds := make([]mutate.IndexAddendum, len(c.PackageFiles)) g, ctx := errgroup.WithContext(context.Background()) for i, file := range c.PackageFiles { - i, file := i, file // Pin range variables for use in goroutine g.Go(func() error { img, err := tarball.ImageFromPath(filepath.Clean(file), nil) if err != nil { diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index a126dac18..31fb9e923 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -651,7 +651,6 @@ func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.R resources := make([]*v1beta1.Resource, len(list.Items)) for i, r := range list.Items { - r := r o, err := AsStruct(&r) if err != nil { return nil, errors.Wrap(err, errExtraResourceAsStruct) diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 023afa2fa..561a2b7f1 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -416,7 +416,7 @@ func AssociateByOrder(t []v1.ComposedTemplate, r []corev1.ObjectReference) []Tem j = len(r) } - for i := 0; i < j; i++ { + for i := range j { a[i].Reference = r[i] } diff --git a/internal/controller/apiextensions/composite/environment_selector.go b/internal/controller/apiextensions/composite/environment_selector.go index 0eff504e6..f56bd8898 100644 --- a/internal/controller/apiextensions/composite/environment_selector.go +++ b/internal/controller/apiextensions/composite/environment_selector.go @@ -191,7 +191,7 @@ func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { }, len(ec)) var valsKind reflect.Kind - for i := 0; i < len(ec); i++ { + for i := range len(ec) { m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&ec[i]) if err != nil { return err diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go index a09ff7769..bb87b88b5 100644 --- a/internal/controller/apiextensions/definition/composed.go +++ b/internal/controller/apiextensions/definition/composed.go @@ -239,8 +239,6 @@ func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context // fast enough for now. It's all in-memory. referenced := make(map[schema.GroupVersionKind]bool) for _, crd := range crds.Items { - crd := crd - if !xcrd.IsEstablished(crd.Status) { continue } diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 8ed3fc670..8aec1e898 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -320,14 +320,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil { if policy, ok := used.GetAnnotations()[usage.AnnotationKeyDeletionAttempt]; ok { // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. - //nolint:contextcheck // See comment on Delete below. + + //nolint:contextcheck // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. go func() { // We do the deletion async and after some delay to make sure the usage is deleted before the // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear // very soon. time.Sleep(2 * time.Second) log.Info("Replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "policy", policy) - // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. if err = r.client.Delete(context.Background(), used, client.PropagationPolicy(policy)); err != nil { log.Info("Error when replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "err", err) } diff --git a/internal/controller/pkg/revision/establisher.go b/internal/controller/pkg/revision/establisher.go index cac651785..bd5f1471f 100644 --- a/internal/controller/pkg/revision/establisher.go +++ b/internal/controller/pkg/revision/establisher.go @@ -145,7 +145,6 @@ func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRe g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrentEstablishers) for _, ref := range allObjs { - ref := ref // Pin the loop variable. g.Go(func() error { select { case <-ctx.Done(): @@ -235,7 +234,6 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa g.SetLimit(maxConcurrentEstablishers) out := make(chan currentDesired, len(objs)) for _, res := range objs { - res := res // Pin the range variable before using it in a Goroutine. g.Go(func() error { // Assert desired object to resource.Object so that we can access its // metadata. @@ -393,7 +391,6 @@ func (e *APIEstablisher) establish(ctx context.Context, allObjs []currentDesired g.SetLimit(maxConcurrentEstablishers) out := make(chan xpv1.TypedReference, len(allObjs)) for _, cd := range allObjs { - cd := cd // Pin the loop variable. g.Go(func() error { if !cd.Exists { // Only create a missing resource if we are going to control it. diff --git a/internal/controller/rbac/definition/reconciler.go b/internal/controller/rbac/definition/reconciler.go index 6667bc0a6..bc4263aef 100644 --- a/internal/controller/rbac/definition/reconciler.go +++ b/internal/controller/rbac/definition/reconciler.go @@ -181,7 +181,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(d) { - cr := cr // Pin range variable so we can take its address. log := log.WithValues("role-name", cr.GetName()) origRV := "" err := r.client.Apply(ctx, &cr, diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index 2970620fd..df35d3e91 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -333,7 +333,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(pr, resources) { - cr := cr // Pin range variable so we can take its address. log := log.WithValues("role-name", cr.GetName()) origRV := "" err := r.client.Apply(ctx, &cr, diff --git a/internal/dag/dag_test.go b/internal/dag/dag_test.go index 787226166..8da0a2fb0 100644 --- a/internal/dag/dag_test.go +++ b/internal/dag/dag_test.go @@ -37,7 +37,6 @@ func (s *simpleNode) Neighbors() []Node { nodes := make([]Node, len(s.neighbors)) i := 0 for _, r := range s.neighbors { - r := r // Pin range variable so we can take its address. nodes[i] = &r i++ } @@ -58,7 +57,6 @@ func (s *simpleNode) AddNeighbors(nodes ...Node) error { func toNodes(n []simpleNode) []Node { nodes := make([]Node, len(n)) for i, r := range n { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes diff --git a/internal/dag/fuzz_test.go b/internal/dag/fuzz_test.go index 0d492c5a3..a05fbc21e 100644 --- a/internal/dag/fuzz_test.go +++ b/internal/dag/fuzz_test.go @@ -31,7 +31,6 @@ type SimpleFuzzNode struct { func toNodesFuzz(n []SimpleFuzzNode) []Node { nodes := make([]Node, len(n)) for i, r := range n { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes @@ -59,7 +58,6 @@ func (s *SimpleFuzzNode) Neighbors() []Node { nodes := make([]Node, len(s.NeighborsField)) i := 0 for _, r := range s.NeighborsField { - r := r // Pin range variable so we can take its address. nodes[i] = &r i++ } diff --git a/internal/names/generate.go b/internal/names/generate.go index 721511ff9..33cf591ab 100644 --- a/internal/names/generate.go +++ b/internal/names/generate.go @@ -84,7 +84,7 @@ func (r *nameGenerator) GenerateName(ctx context.Context, cd resource.Object) er // locally. To reduce that risk even further the caller must employ a // conflict recovery mechanism. maxTries := 10 - for i := 0; i < maxTries; i++ { + for range maxTries { name := r.namer.GenerateName(cd.GetGenerateName()) obj := composite.Unstructured{} obj.SetGroupVersionKind(cd.GetObjectKind().GroupVersionKind()) diff --git a/internal/validation/apiextensions/v1/composition/handler.go b/internal/validation/apiextensions/v1/composition/handler.go index 66984b7a0..21dc9328c 100644 --- a/internal/validation/apiextensions/v1/composition/handler.go +++ b/internal/validation/apiextensions/v1/composition/handler.go @@ -189,7 +189,6 @@ func (v *validator) getNeededCRDs(ctx context.Context, comp *v1.Composition) (ma // Get schema for all Managed Resource Definitions defined by // comp.Spec.Resources. for _, res := range comp.Spec.Resources { - res := res gvk, err := composition.GetBaseObjectGVK(&res) if err != nil { return nil, []error{err} diff --git a/internal/xpkg/fuzz_test.go b/internal/xpkg/fuzz_test.go index ce989c349..359f08887 100644 --- a/internal/xpkg/fuzz_test.go +++ b/internal/xpkg/fuzz_test.go @@ -38,7 +38,7 @@ func FuzzFindXpkgInDir(f *testing.F) { fs.Remove(createdFile) } }() - for i := 0; i < noOfFiles%500; i++ { + for range noOfFiles % 500 { fname, err := ff.GetString() if err != nil { t.Skip() diff --git a/internal/xpkg/scheme.go b/internal/xpkg/scheme.go index c046b45d2..cb7efc73f 100644 --- a/internal/xpkg/scheme.go +++ b/internal/xpkg/scheme.go @@ -77,7 +77,6 @@ func TryConvert(obj runtime.Object, candidates ...conversion.Hub) (runtime.Objec } for _, c := range candidates { - c := c if err := cvt.ConvertTo(c); err == nil { return c, true } diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 6768096c7..76a918126 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -327,7 +327,6 @@ func validateTransformsChainIOTypes(transforms []v1.Transform, fromType xpschema return "", field.InternalError(field.NewPath("transforms"), err) } for i, transform := range transforms { - transform := transform err := IsValidInputForTransform(&transform, inputType) if err != nil && inputType != "" { return "", field.Invalid(field.NewPath("transforms").Index(i), transform, err.Error()) diff --git a/pkg/validation/apiextensions/v1/composition/validator_test.go b/pkg/validation/apiextensions/v1/composition/validator_test.go index c8723b3a2..84454e8cd 100644 --- a/pkg/validation/apiextensions/v1/composition/validator_test.go +++ b/pkg/validation/apiextensions/v1/composition/validator_test.go @@ -714,7 +714,6 @@ func defaultGKToCRDs() map[schema.GroupKind]apiextensions.CustomResourceDefiniti crds := []apiextensions.CustomResourceDefinition{*defaultManagedCrdBuilder().build(), *defaultCompositeCrdBuilder().build()} m := make(map[schema.GroupKind]apiextensions.CustomResourceDefinition, len(crds)) for _, crd := range crds { - crd := crd m[schema.GroupKind{ Group: crd.Spec.Group, Kind: crd.Spec.Names.Kind, diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index ab96525ad..c84c9748e 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -1147,7 +1147,6 @@ func valueOrError(s string, err error) string { func itemsToObjects(items []unstructured.Unstructured) []client.Object { objects := make([]client.Object, len(items)) for i, item := range items { - item := item // unalias loop variable objects[i] = &item } return objects From 355ba7a44b4b23240357fd3e3447d2d3239ffd44 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Sun, 5 May 2024 23:04:59 -0700 Subject: [PATCH 206/370] Workaround malformed generated CRD Signed-off-by: Nic Cope --- Makefile | 11 ++++++++++- ...kg.crossplane.io_deploymentruntimeconfigs.yaml | 15 +++++++++++++++ ...kg.crossplane.io_deploymentruntimeconfigs.yaml | 4 ++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml diff --git a/Makefile b/Makefile index dafd154e6..b9595a1e7 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,15 @@ fallthrough: submodules @make CRD_DIR = cluster/crds +CRD_PATCH_DIR = cluster/crd-patches + +# See patch files for details. +crds.patch: $(KUBECTL) + @$(INFO) patching generated CRDs + @mkdir $(WORK_DIR)/patch + @$(KUBECTL) patch --local --type=json -f $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml --patch-file $(CRD_PATCH_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml -o yaml > $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml + @mv $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml + @$(OK) patched generated CRDs crds.clean: @$(INFO) cleaning generated CRDs @@ -93,7 +102,7 @@ generate.run: gen-kustomize-crds gen-chart-license gen-chart-license: @cp -f LICENSE cluster/charts/crossplane/LICENSE -generate.done: crds.clean +generate.done: crds.clean crds.patch gen-kustomize-crds: @$(INFO) Adding all CRDs to Kustomize file for local development diff --git a/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml new file mode 100644 index 000000000..57e3dd1e1 --- /dev/null +++ b/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -0,0 +1,15 @@ +# For reasons that aren't immediately obvious, updating k8s.io/code-generator +# from v0.29.x to v0.30 triggers a variant of the below issue. As far as I can +# tell, this is the only way to work around it. The below fields are list map +# keys, but aren't required in the generated CRD. +# https://github.com/kubernetes-sigs/controller-tools/issues/444 + +- op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/deploymentTemplate/properties/spec/properties/template/properties/spec/properties/hostAliases/items/required + value: + - ip + +- op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/deploymentTemplate/properties/spec/properties/template/properties/spec/properties/imagePullSecrets/items/required + value: + - name \ No newline at end of file diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index f876d0e55..35a599ab7 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -4211,6 +4211,8 @@ spec: ip: description: IP address of the host file entry. type: string + required: + - ip type: object type: array x-kubernetes-list-map-keys: @@ -4265,6 +4267,8 @@ spec: More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? type: string + required: + - name type: object x-kubernetes-map-type: atomic type: array From 5cfbd675a1d1d3ea1f4d7ecbf8f8fac63c6ced95 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 7 May 2024 10:51:27 -0700 Subject: [PATCH 207/370] Make CompositeReconcilerOptions a method on Reconciler It started as a function, but now we pass several arguments that are all fields of the Reconciler. It's only called once, by the Reconciler. Making it a method shortens the function signature, and makes it clear which things change on each reconcile and which are fixed. Signed-off-by: Nic Cope --- .../apiextensions/definition/reconciler.go | 59 +++++++++---------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index bca9aed00..bfd72a3ec 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -35,7 +35,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -466,7 +465,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } - ro := CompositeReconcilerOptions(r.options, d, r.client, r.log, r.record) + ro := r.CompositeReconcilerOptions(d) ck := resource.CompositeKind(d.GetCompositeGroupVersionKind()) if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { ro = append(ro, composite.WithKindObserver(composite.KindObserverFunc(r.xrInformers.WatchComposedResources))) @@ -560,33 +559,33 @@ func (r *Reconciler) stopCompositeController(d *v1.CompositeResourceDefinition) // CompositeReconcilerOptions builds the options for a composite resource // reconciler. The options vary based on the supplied feature flags. -func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.CompositeResourceDefinition, c client.Client, l logging.Logger, e event.Recorder) []composite.ReconcilerOption { +func (r *Reconciler) CompositeReconcilerOptions(d *v1.CompositeResourceDefinition) []composite.ReconcilerOption { // The default set of reconciler options when no feature flags are enabled. o := []composite.ReconcilerOption{ - composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys())), + composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys())), composite.WithCompositionSelector(composite.NewCompositionSelectorChain( - composite.NewEnforcedCompositionSelector(*d, e), - composite.NewAPIDefaultCompositionSelector(c, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), e), - composite.NewAPILabelSelectorResolver(c), + composite.NewEnforcedCompositionSelector(*d, r.record), + composite.NewAPIDefaultCompositionSelector(r.client, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), r.record), + composite.NewAPILabelSelectorResolver(r.client), )), - composite.WithLogger(l.WithValues("controller", composite.ControllerName(d.GetName()))), - composite.WithRecorder(e.WithAnnotations("controller", composite.ControllerName(d.GetName()))), - composite.WithPollInterval(co.PollInterval), + composite.WithLogger(r.log.WithValues("controller", composite.ControllerName(d.GetName()))), + composite.WithRecorder(r.record.WithAnnotations("controller", composite.ControllerName(d.GetName()))), + composite.WithPollInterval(r.options.PollInterval), } // We only want to enable Composition environment support if the relevant // feature flag is enabled. Otherwise we will default to noop selector and // fetcher that will always return nil. All environment features are // subsequently skipped if the environment is nil. - if co.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { + if r.options.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { o = append(o, - composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(c)), - composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(c))) + composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(r.client)), + composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(r.client))) } // If external secret stores aren't enabled we just fetch connection details // from Kubernetes secrets. - var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(c) + var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(r.client) // We only want to enable ExternalSecretStore support if the relevant // feature flag is enabled. Otherwise, we start the XR reconcilers with @@ -594,48 +593,48 @@ func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.Compos // We also add a new Configurator for ExternalSecretStore which basically // reflects PublishConnectionDetailsWithStoreConfigRef in Composition to // the composite resource. - if co.Features.Enabled(features.EnableAlphaExternalSecretStores) { + if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := []managed.ConnectionPublisher{ - composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys()), - composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, - connection.WithTLSConfig(co.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), + composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys()), + composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, + connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), } // If external secret stores are enabled we need to support fetching // connection details from both secrets and external stores. fetcher = composite.ConnectionDetailsFetcherChain{ - composite.NewSecretConnectionDetailsFetcher(c), - connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(co.ESSOptions.TLSConfig)), + composite.NewSecretConnectionDetailsFetcher(r.client), + connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } cc := composite.NewConfiguratorChain( - composite.NewAPINamingConfigurator(c), - composite.NewAPIConfigurator(c), - composite.NewSecretStoreConnectionDetailsConfigurator(c), + composite.NewAPINamingConfigurator(r.client), + composite.NewAPIConfigurator(r.client), + composite.NewSecretStoreConnectionDetailsConfigurator(r.client), ) o = append(o, composite.WithConnectionPublishers(pc...), composite.WithConfigurator(cc), - composite.WithComposer(composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)))) + composite.WithComposer(composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)))) } // If Composition Functions are enabled we use two different Composer // implementations. One supports P&T (aka 'Resources mode') and the other // Functions (aka 'Pipeline mode'). - if co.Features.Enabled(features.EnableBetaCompositionFunctions) { - ptc := composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)) + if r.options.Features.Enabled(features.EnableBetaCompositionFunctions) { + ptc := composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)) fcopts := []composite.FunctionComposerOption{ - composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(c, fetcher)), + composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.client, fetcher)), composite.WithCompositeConnectionDetailsFetcher(fetcher), } - if co.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(c))) + if r.options.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { + fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(r.client))) } - fc := composite.NewFunctionComposer(c, co.FunctionRunner, fcopts...) + fc := composite.NewFunctionComposer(r.client, r.options.FunctionRunner, fcopts...) // Note that if external secret stores are enabled this will supersede // the WithComposer option specified in that block. From e68021de9c2d5344b9da13ba1aed2dd722789786 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 18:14:03 -0700 Subject: [PATCH 208/370] Re-enable realtime compositions E2E tests Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 390b8bf60..c0f2d0b3a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -247,6 +247,7 @@ jobs: - environment-configs - usage - ssa-claims + - realtime-compositions steps: - name: Setup QEMU From b756bd1eccf46a95d8167a8f5ec5c5a9469e9360 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 7 May 2024 13:12:40 -0700 Subject: [PATCH 209/370] Move realtime composition event handlers into their own file Signed-off-by: Nic Cope --- .../apiextensions/composite/reconciler.go | 48 ---- .../composite/reconciler_test.go | 217 --------------- .../apiextensions/definition/handlers.go | 103 +++++++ .../apiextensions/definition/handlers_test.go | 251 ++++++++++++++++++ .../apiextensions/definition/indexes.go | 23 -- .../apiextensions/definition/reconciler.go | 2 +- 6 files changed, 355 insertions(+), 289 deletions(-) create mode 100644 internal/controller/apiextensions/definition/handlers.go create mode 100644 internal/controller/apiextensions/definition/handlers_test.go diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index e2d634a55..5c5855c1e 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -26,12 +26,8 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -731,47 +727,3 @@ func getComposerResourcesNames(cds []ComposedResource) []string { } return names } - -// EnqueueForCompositionRevisionFunc returns a function that enqueues (the -// related) XRs when a new CompositionRevision is created. This speeds up -// reconciliation of XRs on changes to the Composition by not having to wait for -// the 60s sync period, but be instant. -func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - return func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - rev := createEvent.Object - - // get all XRs - xrs := kunstructured.UnstructuredList{} - xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) - xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") - if err := list(ctx, &xrs); err != nil { - // logging is most we can do here. This is a programming error if it happens. - log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) - return - } - - // enqueue all those that reference the Composition of this revision - compName := rev.Labels[v1.LabelCompositionName] - if compName == "" { - return - } - for _, u := range xrs.Items { - xr := composite.Unstructured{Unstructured: u} - - // only automatic - if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { - continue - } - - // only those that reference the right Composition - if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { - continue - } - - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: xr.GetName(), - Namespace: xr.GetNamespace(), - }}) - } - } -} diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index fbefe27eb..8a3f1152d 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -18,7 +18,6 @@ package composite import ( "context" - "reflect" "testing" "time" @@ -27,19 +26,14 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" @@ -843,214 +837,3 @@ func TestFilterToXRPatches(t *testing.T) { }) } } - -func TestEnqueueForCompositionRevisionFunc(t *testing.T) { - type args struct { - of schema.GroupVersionKind - list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - event runtimeevent.TypedCreateEvent[*v1.CompositionRevision] - } - type want struct { - added []interface{} - } - - dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} - dogList := dog.GroupVersion().WithKind("DogList") - - tests := []struct { - name string - args args - want want - }{ - { - name: "empty", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { - // test parameters only here, not in the later tests for brevity. - u, ok := list.(*kunstructured.UnstructuredList) - if !ok { - t.Errorf("list was not an UnstructuredList") - } else if got := u.GroupVersionKind(); got != dogList { - t.Errorf("list was not a DogList, got: %s", got) - } - if len(opts) != 0 { - t.Errorf("unexpected list options: %#v", opts) - } - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - }, - { - name: "automatic management policy", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: "ns", - Name: "obj1", - }}}, - }, - }, - { - name: "manual management policy", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateManual - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "other composition", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "multiple", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - automatic := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&automatic) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - obj2 := obj1.DeepCopy() - obj2.SetName("obj2") - - obj3 := obj1.DeepCopy() - obj3.SetName("obj3") - obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - obj4 := obj1.DeepCopy() - obj4.SetName("obj4") - manual := xpv1.UpdateManual - obj4.SetCompositionUpdatePolicy(&manual) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - obj1.Unstructured, - obj2.Unstructured, - obj3.Unstructured, - } - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{ - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fn := EnqueueForCompositionRevisionFunc(resource.CompositeKind(tt.args.of), tt.args.list, logging.NewNopLogger()) - q := rateLimitingQueueMock{} - fn(context.TODO(), tt.args.event, &q) - if got := q.added; !reflect.DeepEqual(got, tt.want.added) { - t.Errorf("EnqueueForCompositionRevisionFunc(...)(ctx, event, queue) = %v, want %v", got, tt.want) - } - }) - } -} - -type rateLimitingQueueMock struct { - workqueue.RateLimitingInterface - added []interface{} -} - -func (f *rateLimitingQueueMock) Add(item interface{}) { - f.added = append(f.added, item) -} diff --git a/internal/controller/apiextensions/definition/handlers.go b/internal/controller/apiextensions/definition/handlers.go new file mode 100644 index 000000000..dc70b3680 --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers.go @@ -0,0 +1,103 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "context" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + cache "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +// For comp rev +// EnqueueForCompositionRevisionFunc returns a function that enqueues (the +// related) XRs when a new CompositionRevision is created. This speeds up +// reconciliation of XRs on changes to the Composition by not having to wait for +// the 60s sync period, but be instant. +func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { + return func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { + rev := createEvent.Object + + // get all XRs + xrs := kunstructured.UnstructuredList{} + xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) + xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") + if err := list(ctx, &xrs); err != nil { + // logging is most we can do here. This is a programming error if it happens. + log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) + return + } + + // enqueue all those that reference the Composition of this revision + compName := rev.Labels[v1.LabelCompositionName] + if compName == "" { + return + } + for _, u := range xrs.Items { + xr := composite.Unstructured{Unstructured: u} + + // only automatic + if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { + continue + } + + // only those that reference the right Composition + if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { + continue + } + + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: xr.GetName(), + Namespace: xr.GetNamespace(), + }}) + } + } +} + +// TODO(negz): Figure out a way to plumb this with controller-runtime v0.18.x +// style sources. + +func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { //nolint:unused // See comment above. + return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() + key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) + composites := kunstructured.UnstructuredList{} + composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) + if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { + log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) + return + } + // queue those composites for reconciliation + for _, xr := range composites.Items { + log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) + } + } +} diff --git a/internal/controller/apiextensions/definition/handlers_test.go b/internal/controller/apiextensions/definition/handlers_test.go new file mode 100644 index 000000000..c9e407d25 --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers_test.go @@ -0,0 +1,251 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "context" + "reflect" + "testing" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +func TestEnqueueForCompositionRevisionFunc(t *testing.T) { + type args struct { + of schema.GroupVersionKind + list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error + event runtimeevent.TypedCreateEvent[*v1.CompositionRevision] + } + type want struct { + added []interface{} + } + + dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} + dogList := dog.GroupVersion().WithKind("DogList") + + tests := []struct { + name string + args args + want want + }{ + { + name: "empty", + args: args{ + of: dog, + list: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { + // test parameters only here, not in the later tests for brevity. + u, ok := list.(*kunstructured.UnstructuredList) + if !ok { + t.Errorf("list was not an UnstructuredList") + } else if got := u.GroupVersionKind(); got != dogList { + t.Errorf("list was not a DogList, got: %s", got) + } + if len(opts) != 0 { + t.Errorf("unexpected list options: %#v", opts) + } + return nil + }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + }, + { + name: "automatic management policy", + args: args{ + of: dog, + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: "ns", + Name: "obj1", + }}}, + }, + }, + { + name: "manual management policy", + args: args{ + of: dog, + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateManual + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + { + name: "other composition", + args: args{ + of: dog, + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + { + name: "multiple", + args: args{ + of: dog, + list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + automatic := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&automatic) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + obj2 := obj1.DeepCopy() + obj2.SetName("obj2") + + obj3 := obj1.DeepCopy() + obj3.SetName("obj3") + obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + obj4 := obj1.DeepCopy() + obj4.SetName("obj4") + manual := xpv1.UpdateManual + obj4.SetCompositionUpdatePolicy(&manual) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + obj1.Unstructured, + obj2.Unstructured, + obj3.Unstructured, + } + + return nil + }, + event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{ + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fn := EnqueueForCompositionRevisionFunc(resource.CompositeKind(tt.args.of), tt.args.list, logging.NewNopLogger()) + q := rateLimitingQueueMock{} + fn(context.TODO(), tt.args.event, &q) + if got := q.added; !reflect.DeepEqual(got, tt.want.added) { + t.Errorf("EnqueueForCompositionRevisionFunc(...)(ctx, event, queue) = %v, want %v", got, tt.want) + } + }) + } +} + +type rateLimitingQueueMock struct { + workqueue.RateLimitingInterface + added []interface{} +} + +func (f *rateLimitingQueueMock) Add(item interface{}) { + f.added = append(f.added, item) +} diff --git a/internal/controller/apiextensions/definition/indexes.go b/internal/controller/apiextensions/definition/indexes.go index 6aaca2cd8..f4829dfa7 100644 --- a/internal/controller/apiextensions/definition/indexes.go +++ b/internal/controller/apiextensions/definition/indexes.go @@ -78,26 +78,3 @@ func IndexCompositeResourcesRefs(o client.Object) []string { func refKey(ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) } - -// TODO(negz): Figure out a way to plumb this with controller-runtime v0.18.x -// style sources. - -// func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { -// return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { -// mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() -// key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) -// -// composites := kunstructured.UnstructuredList{} -// composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) -// if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { -// log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) -// return -// } -// -// // queue those composites for reconciliation -// for _, xr := range composites.Items { -// log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) -// q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) -// } -// } -// } diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index bfd72a3ec..9ff199b8b 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -493,7 +493,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco engine.WatchFor(u, &handler.EnqueueRequestForObject{}), // enqueue composites whenever a matching CompositionRevision is created engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}, handler.TypedFuncs[*v1.CompositionRevision]{ - CreateFunc: composite.EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), + CreateFunc: EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), })), } From 5ce3eddc2e1f6d061c26af4f01c918766bf0c2b8 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 16:46:56 -0700 Subject: [PATCH 210/370] Use a single cache for all dynamic controllers Crossplane uses a controller engine to dynamically start claim and XR controllers when a new XRD is installed. Before this commit, each controller gets at least one cache. This is because when I built this functionality, you couldn't stop a single informer within a cache (a cache is basically a map of informers by GVK). When realtime composition is enabled, there are even more caches. One per composed resource GVK. A GVK routed cache routes cache lookups to these various delegate caches. Meanwhile, controller-runtime recently made it possible to stop an informer within a cache. It's also been possible to remove an event handler from an informer for some time (since Kubernetes 1.26). https://github.com/kubernetes-sigs/controller-runtime/pull/2285 https://github.com/kubernetes-sigs/controller-runtime/pull/2046 This commit uses a single client, backed by a single cache, across all dynamic controllers (specifically the definition, offered, claim, and XR controllers). Compared to the current implementation, this commit: * Takes fewer global locks when realtime compositions are enabled. Locking is now mostly at the controller scope. * Works with the breaking changes to source.Source introduced in controller-runtime v0.18. :) I think this makes the realtime composition code a little easier to follow by consolodating it into the ControllerEngine, but that's pretty subjective. Signed-off-by: Nic Cope --- cmd/crossplane/core/core.go | 92 +- go.mod | 4 +- .../apiextensions/claim/reconciler.go | 13 +- .../apiextensions/claim/reconciler_test.go | 548 +++++---- .../composite/composition_functions.go | 4 - .../composite/composition_functions_test.go | 14 +- .../apiextensions/composite/composition_pt.go | 5 - .../apiextensions/composite/reconciler.go | 107 +- .../composite/reconciler_test.go | 353 +++--- .../apiextensions/composite/watch/watch.go | 153 +++ .../composite/watch/watch_test.go | 244 ++++ .../apiextensions/composition/reconciler.go | 5 +- .../apiextensions/controller/options.go | 4 + .../apiextensions/definition/composed.go | 287 ----- .../apiextensions/definition/handlers.go | 128 +- .../apiextensions/definition/handlers_test.go | 240 ++-- .../apiextensions/definition/indexes.go | 28 +- .../apiextensions/definition/indexes_test.go | 44 - .../apiextensions/definition/reconciler.go | 327 +++--- .../definition/reconciler_test.go | 1030 ++++++++--------- .../apiextensions/offered/reconciler.go | 169 ++- .../apiextensions/offered/reconciler_test.go | 858 +++++++++----- .../apiextensions/usage/reconciler.go | 6 + internal/controller/engine/cache.go | 305 ----- internal/controller/engine/engine.go | 291 ----- internal/controller/engine/engine_test.go | 221 ---- internal/engine/cache.go | 189 +++ internal/engine/cache_test.go | 166 +++ internal/engine/engine.go | 539 +++++++++ internal/engine/engine_test.go | 897 ++++++++++++++ internal/engine/source.go | 195 ++++ internal/engine/source_test.go | 272 +++++ 32 files changed, 4761 insertions(+), 2977 deletions(-) create mode 100644 internal/controller/apiextensions/composite/watch/watch.go create mode 100644 internal/controller/apiextensions/composite/watch/watch_test.go delete mode 100644 internal/controller/apiextensions/definition/composed.go delete mode 100644 internal/controller/engine/cache.go delete mode 100644 internal/controller/engine/engine.go delete mode 100644 internal/controller/engine/engine_test.go create mode 100644 internal/engine/cache.go create mode 100644 internal/engine/cache_test.go create mode 100644 internal/engine/engine.go create mode 100644 internal/engine/engine_test.go create mode 100644 internal/engine/source.go create mode 100644 internal/engine/source_test.go diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 0a5545fed..7ad9b444d 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -21,6 +21,7 @@ import ( "context" "crypto/tls" "fmt" + "io" "os" "path/filepath" "time" @@ -30,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -44,11 +46,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/feature" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane/internal/controller/apiextensions" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" "github.com/crossplane/crossplane/internal/controller/pkg" pkgcontroller "github.com/crossplane/crossplane/internal/controller/pkg/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/initializer" "github.com/crossplane/crossplane/internal/metrics" @@ -134,6 +138,8 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli Deduplicate: true, }) + // The claim and XR controllers don't use the manager's cache or client. + // They use their own. They're setup later in this method. eb := record.NewBroadcaster() mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, c.MaxReconcileRate), ctrl.Options{ Scheme: s, @@ -270,9 +276,91 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli log.Info("Alpha feature enabled", "flag", features.EnableAlphaClaimSSA) } + // Claim and XR controllers are started and stopped dynamically by the + // ControllerEngine below. When realtime compositions are enabled, they also + // start and stop their watches (e.g. of composed resources) dynamically. To + // do this, the ControllerEngine must have exclusive ownership of a cache. + // This allows it to track what controllers are using the cache's informers. + ca, err := cache.New(mgr.GetConfig(), cache.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + SyncPeriod: &c.SyncInterval, + + // When a CRD is deleted, any informers for its GVKs will start trying + // to restart their watches, and fail with scary errors. This should + // only happen when realtime composition is enabled, and we should GC + // the informer within 60 seconds. This handler tries to make the error + // a little more informative, and less scary. + DefaultWatchErrorHandler: func(_ *kcache.Reflector, err error) { + if errors.Is(io.EOF, err) { + // Watch closed normally. + return + } + log.Debug("Watch error - probably due to CRD being uninstalled", "error", err) + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create cache for API extension controllers") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + // Don't start the cache until the manager is elected. + <-mgr.Elected() + + if err := ca.Start(ctx); err != nil { + log.Info("API extensions cache returned an error", "error", err) + } + + log.Info("API extensions cache stopped") + }() + + cl, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + Cache: &client.CacheOptions{ + Reader: ca, + + // Don't cache secrets - there may be a lot of them. + DisableFor: []client.Object{&corev1.Secret{}}, + + // Cache unstructured resources (like XRs and MRs) on Get and List. + Unstructured: true, + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create client for API extension controllers") + } + + // It's important the engine's client is wrapped with unstructured.NewClient + // because controller-runtime always caches *unstructured.Unstructured, not + // our wrapper types like *composite.Unstructured. This client takes care of + // automatically wrapping and unwrapping *unstructured.Unstructured. + ce := engine.New(mgr, + engine.TrackInformers(ca, mgr.GetScheme()), + unstructured.NewClient(cl), + engine.WithLogger(log), + ) + + // TODO(negz): Garbage collect informers for CRs that are still defined + // (i.e. still have CRDs) but aren't used? Currently if an XR starts + // composing a kind of CR then stops, we won't stop the unused informer + // until the CRD that defines the CR is deleted. That could never happen. + // Consider for example composing two types of MR from the same provider, + // then updating to compose only one. + + // Garbage collect informers for custom resources when their CRD is deleted. + if err := ce.GarbageCollectCustomResourceInformers(ctx); err != nil { + return errors.Wrap(err, "cannot start garbage collector for custom resource informers") + } + ao := apiextensionscontroller.Options{ - Options: o, - FunctionRunner: functionRunner, + Options: o, + ControllerEngine: ce, + FunctionRunner: functionRunner, } if err := apiextensions.Setup(mgr, ao); err != nil { diff --git a/go.mod b/go.mod index 5e0fcdc4c..002770eb5 100644 --- a/go.mod +++ b/go.mod @@ -139,7 +139,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -151,7 +151,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 + github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index 061e74661..be4cc7d05 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -27,7 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -37,7 +36,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -221,14 +219,6 @@ func defaultCRClaim(c client.Client) crClaim { // A ReconcilerOption configures a Reconciler. type ReconcilerOption func(*Reconciler) -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } -} - // WithManagedFieldsUpgrader specifies how the Reconciler should upgrade claim // and composite resource (XR) managed fields from client-side apply to // server-side apply. @@ -300,8 +290,7 @@ func WithPollInterval(after time.Duration) ReconcilerOption { // The returned Reconciler will apply only the ObjectMetaConfigurator by // default; most callers should supply one or more CompositeConfigurators to // configure their composite resources. -func NewReconciler(m manager.Manager, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { - c := unstructured.NewClient(m.GetClient()) +func NewReconciler(c client.Client, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { r := &Reconciler{ client: c, gvkClaim: schema.GroupVersionKind(of), diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index b27d52eca..f8a1ff899 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -36,7 +35,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -47,10 +45,10 @@ func TestReconcile(t *testing.T) { now := metav1.Now() type args struct { - mgr manager.Manager - of resource.CompositeClaimKind - with resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeClaimKind + with resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -65,11 +63,8 @@ func TestReconcile(t *testing.T) { "ClaimNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -79,11 +74,8 @@ func TestReconcile(t *testing.T) { "GetClaimError": { reason: "We should return any error we encounter getting the claim.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -94,19 +86,16 @@ func TestReconcile(t *testing.T) { "ReconciliationPaused": { reason: `If a claim has the pause annotation with value "true" we should stop reconciling and not requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -116,21 +105,20 @@ func TestReconcile(t *testing.T) { "ReconciliationUnpaused": { reason: "If a claim has the ReconcilePaused status condition but no paused annotation, the condition should change to ReconcileSuccess.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - // This claim was paused. - obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that our synced status condition changed - // from Paused to ReconcileSuccess. - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + // This claim was paused. + obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that our synced status condition changed + // from Paused to ReconcileSuccess. + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -147,27 +135,24 @@ func TestReconcile(t *testing.T) { "GetCompositeError": { reason: "The reconcile should fail if we can't get the XR, unless it wasn't found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Return an error getting the XR. - return errBoom - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Return an error getting the XR. + return errBoom + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) + })), }, }, want: want{ @@ -177,29 +162,26 @@ func TestReconcile(t *testing.T) { "CompositeAlreadyBoundError": { reason: "The reconcile should fail if the referenced XR is bound to another claim", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // This XR was created, and references another - // claim. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // This XR was created, and references another + // claim. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) + })), }, }, want: want{ @@ -209,31 +191,30 @@ func TestReconcile(t *testing.T) { "DeleteCompositeError": { reason: "We should not try to delete if the resource is already gone.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists. - o.SetCreationTimestamp(now) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists. + o.SetCreationTimestamp(now) + } + return nil }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -246,21 +227,20 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionDetailsError": { reason: "The reconcile should fail if we can't unpublish the claim's connection details.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) + })), + }, + opts: []ReconcilerOption{ WithConnectionUnpublisher(ConnectionUnpublisherFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom })), @@ -273,21 +253,20 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "The reconcile should fail if we can't remove the claim's finalizer.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -300,21 +279,20 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue if we successfully delete the resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileSuccess()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileSuccess()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -327,28 +305,27 @@ func TestReconcile(t *testing.T) { "SuccessfulForegroundDelete": { reason: "We should requeue if we successfully delete the bound composite resource using Foreground deletion", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + } + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -361,40 +338,38 @@ func TestReconcile(t *testing.T) { "ForegroundDeleteWaitForCompositeDeletion": { reason: "We should requeue if we successfully deleted the bound composite resource using Foreground deletion and it has not yet been deleted", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // being deleted. - o.SetCreationTimestamp(now) - o.SetDeletionTimestamp(&now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) // We want to foreground delete. fg := xpv1.CompositeDeleteForeground - cm.SetCompositeDeletePolicy(&fg) + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // being deleted. + o.SetCreationTimestamp(now) + o.SetDeletionTimestamp(&now) + o.SetClaimReference(&claim.Reference{}) + } + return nil + }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + cm.SetCompositeDeletePolicy(&fg) - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - })), - }, - ), + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -407,15 +382,14 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should fail the reconcile if we can't add the claim's finalizer", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -428,15 +402,14 @@ func TestReconcile(t *testing.T) { "SyncCompositeError": { reason: "We should fail the reconcile if we can't bind and sync the claim with a composite resource", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -450,31 +423,30 @@ func TestReconcile(t *testing.T) { "CompositeNotReady": { reason: "We should return early if the bound composite resource is not yet ready", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // still being created. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Creating()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // still being created. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Creating()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -488,29 +460,28 @@ func TestReconcile(t *testing.T) { "PropagateConnectionError": { reason: "We should fail the reconcile if we can't propagate the bound XR's connection details", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -527,31 +498,30 @@ func TestReconcile(t *testing.T) { "SuccessfulReconcile": { reason: "We should not requeue if we successfully synced the composite resource and propagated its connection details", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConnectionDetailsLastPublishedTime(&now) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(xpv1.Available()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConnectionDetailsLastPublishedTime(&now) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(xpv1.Available()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -569,7 +539,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.with, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.with, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 31fb9e923..d3b214c65 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -237,10 +237,6 @@ func WithManagedFieldsUpgrader(u ManagedFieldsUpgrader) FunctionComposerOption { // NewFunctionComposer returns a new Composer that supports composing resources using // both Patch and Transform (P&T) logic and a pipeline of Composition Functions. func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComposerOption) *FunctionComposer { - // TODO(negz): Can we avoid double-wrapping if the supplied client is - // already wrapped? Or just do away with unstructured.NewClient completely? - kube = unstructured.NewClient(kube) - f := NewSecretConnectionDetailsFetcher(kube) c := &FunctionComposer{ diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 0c5d524d0..062c71f4b 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -406,11 +406,12 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockPatch: test.NewMockPatchFn(nil, func(obj client.Object) error { // We only want to return an error for the XR. - u := obj.(*kunstructured.Unstructured) - if u.GetKind() == "CoolComposed" { - return nil + switch obj.(type) { + case *composite.Unstructured: + return errBoom + default: } - return errBoom + return nil }), }, r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { @@ -505,9 +506,10 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil, func(obj client.Object) error { // We only want to return an error if we're patching a // composed resource. - u := obj.(*kunstructured.Unstructured) - if u.GetKind() == "UncoolComposed" { + switch obj.(type) { + case *composed.Unstructured: return errBoom + default: } return nil }), diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 561a2b7f1..39b710c07 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -32,7 +32,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -126,10 +125,6 @@ type PTComposer struct { // NewPTComposer returns a Composer that composes resources using Patch and // Transform (P&T) Composition - a Composition's bases, patches, and transforms. func NewPTComposer(kube client.Client, o ...PTComposerOption) *PTComposer { - // TODO(negz): Can we avoid double-wrapping if the supplied client is - // already wrapped? Or just do away with unstructured.NewClient completely? - kube = unstructured.NewClient(kube) - c := &PTComposer{ client: resource.ClientApplicator{Client: kube, Applicator: resource.NewAPIPatchingApplicator(kube)}, diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 5c5855c1e..47c13396c 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -28,7 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -38,10 +38,11 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/engine" ) const ( @@ -251,14 +252,6 @@ func WithPollInterval(interval time.Duration) ReconcilerOption { }) } -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } -} - // WithCompositionRevisionFetcher specifies how the composition to be used should be // fetched. func WithCompositionRevisionFetcher(f CompositionRevisionFetcher) ReconcilerOption { @@ -332,11 +325,13 @@ func WithComposer(c Composer) ReconcilerOption { } } -// WithKindObserver specifies how the Reconciler should observe kinds for -// realtime events. -func WithKindObserver(o KindObserver) ReconcilerOption { +// WithWatchStarter specifies how the Reconciler should start watches for any +// resources it composes. +func WithWatchStarter(controllerName string, h handler.EventHandler, w WatchStarter) ReconcilerOption { return func(r *Reconciler) { - r.kindObserver = o + r.controllerName = controllerName + r.watchHandler = h + r.engine = w } } @@ -359,6 +354,27 @@ func (fn CompositionRevisionValidatorFn) Validate(c *v1.CompositionRevision) err return fn(c) } +// A WatchStarter can start a new watch. XR controllers use this to dynamically +// start watches when they compose new kinds of resources. +type WatchStarter interface { + // StartWatches starts the supplied watches, if they're not running already. + StartWatches(name string, ws ...engine.Watch) error +} + +// A NopWatchStarter does nothing. +type NopWatchStarter struct{} + +// StartWatches does nothing. +func (n *NopWatchStarter) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// A WatchStarterFn is a function that can start a new watch. +type WatchStarterFn func(name string, ws ...engine.Watch) error + +// StartWatches starts the supplied watches, if they're not running already. +func (fn WatchStarterFn) StartWatches(name string, ws ...engine.Watch) error { + return fn(name, ws...) +} + type environment struct { EnvironmentFetcher } @@ -371,34 +387,15 @@ type compositeResource struct { managed.ConnectionPublisher } -// KindObserver tracks kinds of referenced composed resources in composite -// resources in order to start watches for them for realtime events. -type KindObserver interface { - // WatchComposedResources starts a watch of the given kinds to trigger reconciles when - // a referenced object of those kinds changes. - WatchComposedResources(kind ...schema.GroupVersionKind) -} - -// KindObserverFunc implements KindObserver as a function. -type KindObserverFunc func(kind ...schema.GroupVersionKind) - -// WatchComposedResources starts a watch of the given kinds to trigger reconciles when -// a referenced object of those kinds changes. -func (fn KindObserverFunc) WatchComposedResources(kind ...schema.GroupVersionKind) { - fn(kind...) -} - // NewReconciler returns a new Reconciler of composite resources. -func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(c client.Client, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - client: kube, + client: c, gvk: schema.GroupVersionKind(of), revision: revision{ - CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: kube, Applicator: resource.NewAPIPatchingApplicator(kube)}), + CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: c, Applicator: resource.NewAPIPatchingApplicator(c)}), CompositionRevisionValidator: CompositionRevisionValidatorFn(func(rev *v1.CompositionRevision) error { // TODO(negz): Presumably this validation will eventually be // removed in favor of the new Composition validation @@ -417,18 +414,21 @@ func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...Recon }, composite: compositeResource{ - Finalizer: resource.NewAPIFinalizer(kube, finalizer), - CompositionSelector: NewAPILabelSelectorResolver(kube), + Finalizer: resource.NewAPIFinalizer(c, finalizer), + CompositionSelector: NewAPILabelSelectorResolver(c), EnvironmentSelector: NewNoopEnvironmentSelector(), - Configurator: NewConfiguratorChain(NewAPINamingConfigurator(kube), NewAPIConfigurator(kube)), + Configurator: NewConfiguratorChain(NewAPINamingConfigurator(c), NewAPIConfigurator(c)), // TODO(negz): In practice this is a filtered publisher that will // never filter any keys. Is there an unfiltered variant we could // use by default instead? - ConnectionPublisher: NewAPIFilteredSecretPublisher(kube, []string{}), + ConnectionPublisher: NewAPIFilteredSecretPublisher(c, []string{}), }, - resource: NewPTComposer(kube), + resource: NewPTComposer(c), + + // Dynamic watches are disabled by default. + engine: &NopWatchStarter{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -454,8 +454,12 @@ type Reconciler struct { revision revision composite compositeResource - resource Composer - kindObserver KindObserver + resource Composer + + // Used to dynamically start composed resource watches. + controllerName string + engine WatchStarter + watchHandler handler.EventHandler log logging.Logger record event.Recorder @@ -620,12 +624,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } - if r.kindObserver != nil { - var gvks []schema.GroupVersionKind - for _, ref := range xr.GetResourceReferences() { - gvks = append(gvks, ref.GroupVersionKind()) - } - r.kindObserver.WatchComposedResources(gvks...) + ws := make([]engine.Watch, len(xr.GetResourceReferences())) + for i, ref := range xr.GetResourceReferences() { + ws[i] = engine.WatchFor(composed.New(composed.FromReference(ref)), engine.WatchTypeComposedResource, r.watchHandler) + } + + // StartWatches is a no-op unless the realtime compositions feature flag is + // enabled. When the flag is enabled, the ControllerEngine that starts this + // controller also starts a garbage collector for its watches. + if err := r.engine.StartWatches(r.controllerName, ws...); err != nil { + // TODO(negz): If we stop polling this will be a more serious error. + log.Debug("Cannot start watches for composed resources. Relying on polling to know when they change.", "controller-name", r.controllerName, "error", err) } published, err := r.composite.PublishConnection(ctx, xr, res.ConnectionDetails) diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 8a3f1152d..6d68d15c1 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -37,11 +36,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/engine" ) var _ Composer = ComposerSelectorFn(func(_ *v1.CompositionMode) Composer { return nil }) @@ -51,9 +51,9 @@ func TestReconcile(t *testing.T) { cd := managed.ConnectionDetails{"a": []byte("b")} type args struct { - mgr manager.Manager - of resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -70,11 +70,8 @@ func TestReconcile(t *testing.T) { "CompositeResourceNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -84,11 +81,8 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceError": { reason: "We should return error encountered while getting the composite resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -98,17 +92,16 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionError": { reason: "We should return any error encountered while unpublishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { + want.SetDeletionTimestamp(&now) + want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { - want.SetDeletionTimestamp(&now) - want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithConnectionPublishers(managed.ConnectionPublisherFns{ UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { @@ -124,17 +117,16 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "We should return any error encountered while removing finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom @@ -154,17 +146,16 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should return no error when deleted successfully.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil @@ -184,14 +175,13 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error encountered while adding finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom @@ -206,14 +196,13 @@ func TestReconcile(t *testing.T) { "SelectCompositionError": { reason: "We should return any error encountered while selecting a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return errBoom @@ -227,15 +216,14 @@ func TestReconcile(t *testing.T) { "FetchCompositionError": { reason: "We should return any error encountered while fetching a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -253,15 +241,14 @@ func TestReconcile(t *testing.T) { "ValidateCompositionError": { reason: "We should return any error encountered while validating our Composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -282,15 +269,14 @@ func TestReconcile(t *testing.T) { "ConfigureCompositeError": { reason: "We should return any error encountered while configuring the composite resource.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -312,12 +298,11 @@ func TestReconcile(t *testing.T) { "SelectEnvironmentError": { reason: "We should return any error encountered while selecting the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil @@ -340,12 +325,11 @@ func TestReconcile(t *testing.T) { "FetchEnvironmentError": { reason: "We should requeue on any error encountered while fetching the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil @@ -368,15 +352,14 @@ func TestReconcile(t *testing.T) { "ComposeResourcesError": { reason: "We should return any error encountered while composing resources.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -401,15 +384,14 @@ func TestReconcile(t *testing.T) { "PublishConnectionDetailsError": { reason: "We should return any error encountered while publishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -439,15 +421,14 @@ func TestReconcile(t *testing.T) { "CompositionWarnings": { reason: "We should not requeue if our Composer returned warning events.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { + xr.SetCompositionReference(&corev1.ObjectReference{}) + xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { - xr.SetCompositionReference(&corev1.ObjectReference{}) - xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -482,15 +463,14 @@ func TestReconcile(t *testing.T) { "ComposedResourcesNotReady": { reason: "We should requeue if any of our composed resources are not yet ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -549,16 +529,24 @@ func TestReconcile(t *testing.T) { "ComposedResourcesReady": { reason: "We should requeue after our poll interval if all of our composed resources are ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetResourceReferences([]corev1.ObjectReference{{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + }}) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetResourceReferences([]corev1.ObjectReference{{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + }}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -586,6 +574,19 @@ func TestReconcile(t *testing.T) { return true, nil }, }), + WithWatchStarter("cool-controller", nil, WatchStarterFn(func(_ string, ws ...engine.Watch) error { + cd := composed.New(composed.FromReference(corev1.ObjectReference{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + })) + want := []engine.Watch{engine.WatchFor(cd, engine.WatchTypeComposedResource, nil)} + + if diff := cmp.Diff(want, ws, cmp.AllowUnexported(engine.Watch{})); diff != "" { + t.Errorf("StartWatches(...): -want, +got:\n%s", diff) + } + + return nil + })), }, }, want: want{ @@ -595,17 +596,14 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedSuccessful": { reason: `If a composite resource has the pause annotation with value "true", there should be no further requeue requests.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -615,14 +613,11 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedError": { reason: `If a composite resource has the pause annotation with value "true" and the status update due to reconciliation being paused fails, error should be reported causing an exponentially backed-off requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), }, }, want: want{ @@ -632,20 +627,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumes": { reason: `If a composite resource has the pause annotation with some value other than "true" and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -678,20 +672,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumesAfterAnnotationRemoval": { reason: `If a composite resource has the pause annotation removed and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + // no annotation atm + // (but reconciliations were already paused) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - // no annotation atm - // (but reconciliations were already paused) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -725,7 +718,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/apiextensions/composite/watch/watch.go b/internal/controller/apiextensions/composite/watch/watch.go new file mode 100644 index 000000000..262e120b0 --- /dev/null +++ b/internal/controller/apiextensions/composite/watch/watch.go @@ -0,0 +1,153 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch implements a garbage collector for composed resource watches. +package watch + +import ( + "context" + "time" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + "github.com/crossplane/crossplane/internal/engine" +) + +// A ControllerEngine can get and stop watches for a controller. +type ControllerEngine interface { + GetWatches(name string) ([]engine.WatchID, error) + StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + GetClient() client.Client +} + +// A GarbageCollector garbage collects watches for a single composite resource +// (XR) controller. A watch is eligible for garbage collection when none of the +// XRs the controller owns resource references its GVK. The garbage collector +// periodically lists all of the controller's XRs to determine what GVKs they +// still reference. +type GarbageCollector struct { + controllerName string + xrGVK schema.GroupVersionKind + + engine ControllerEngine + + log logging.Logger +} + +// A GarbageCollectorOption configures a GarbageCollector. +type GarbageCollectorOption func(gc *GarbageCollector) + +// WithLogger configures how a GarbageCollector should log. +func WithLogger(l logging.Logger) GarbageCollectorOption { + return func(gc *GarbageCollector) { + gc.log = l + } +} + +// NewGarbageCollector creates a new watch garbage collector for a controller. +func NewGarbageCollector(name string, of resource.CompositeKind, ce ControllerEngine, o ...GarbageCollectorOption) *GarbageCollector { + gc := &GarbageCollector{ + controllerName: name, + xrGVK: schema.GroupVersionKind(of), + engine: ce, + log: logging.NewNopLogger(), + } + for _, fn := range o { + fn(gc) + } + return gc +} + +// GarbageCollectWatches runs garbage collection at the specified interval, +// until the supplied context is cancelled. It stops any watches for resource +// types that are no longer composed by any composite resource (XR). +func (gc *GarbageCollector) GarbageCollectWatches(ctx context.Context, interval time.Duration) { + t := time.NewTicker(interval) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + gc.log.Debug("Stopping composite controller watch garbage collector", "error", ctx.Err()) + return + case <-t.C: + if err := gc.GarbageCollectWatchesNow(ctx); err != nil { + gc.log.Info("Cannot garbage collect composite controller watches", "error", err) + } + } + } +} + +// GarbageCollectWatchesNow stops any watches for resource types that are no +// longer composed by any composite resource (XR). It's safe to call from +// multiple goroutines. +func (gc *GarbageCollector) GarbageCollectWatchesNow(ctx context.Context) error { + // List all XRs of the type we're interested in. + l := &kunstructured.UnstructuredList{} + l.SetAPIVersion(gc.xrGVK.GroupVersion().String()) + l.SetKind(gc.xrGVK.Kind + "List") + if err := gc.engine.GetClient().List(ctx, l); err != nil { + return errors.Wrap(err, "cannot list composite resources") + } + + // Build the set of GVKs they still reference. + used := make(map[engine.WatchID]bool) + for _, u := range l.Items { + xr := &composite.Unstructured{Unstructured: u} + for _, ref := range xr.GetResourceReferences() { + used[engine.WatchID{Type: engine.WatchTypeComposedResource, GVK: schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)}] = true + } + } + + // Get the set of running watches. + running, err := gc.engine.GetWatches(gc.controllerName) + if err != nil { + return errors.Wrap(err, "cannot get running watches") + } + + stop := make([]engine.WatchID, 0) + for _, wid := range running { + if !used[wid] { + stop = append(stop, wid) + } + } + + // No need to call StopWatches if there's nothing to stop. + if len(stop) == 0 { + return nil + } + + // Stop any watches that are running, but not used. + // + // It's possible watches were started or stopped since we called GetWatches. + // That's fine. Stopping a watch that doesn't exist is a no-op, and if a + // watch was started that needs garbage collecting we'll get it eventually + // when GC runs again. + // + // It's also possible we'll stop a watch that's actually in use, if an XR + // started composing its GVK between when we built the map of used watches + // and here where we call StopWatches. It'll restart next time the XR + // controller calls StartWatches. + _, err = gc.engine.StopWatches(ctx, gc.controllerName, stop...) + return errors.Wrap(err, "cannot stop watches for composed resource types that are no longer referenced by any composite resource") +} diff --git a/internal/controller/apiextensions/composite/watch/watch_test.go b/internal/controller/apiextensions/composite/watch/watch_test.go new file mode 100644 index 000000000..af72b5129 --- /dev/null +++ b/internal/controller/apiextensions/composite/watch/watch_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + "github.com/crossplane/crossplane-runtime/pkg/test" + + "github.com/crossplane/crossplane/internal/engine" +) + +var _ ControllerEngine = &MockEngine{} + +type MockEngine struct { + MockGetWatches func(name string) ([]engine.WatchID, error) + MockStopWatches func(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + MockGetClient func() client.Client +} + +func (m *MockEngine) GetWatches(name string) ([]engine.WatchID, error) { + return m.MockGetWatches(name) +} + +func (m *MockEngine) StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) { + return m.MockStopWatches(ctx, name, ws...) +} + +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() +} + +func TestGarbageCollectWatchesNow(t *testing.T) { + errBoom := errors.New("boom") + + type params struct { + name string + of resource.CompositeKind + ce ControllerEngine + o []GarbageCollectorOption + } + type args struct { + ctx context.Context + } + type want struct { + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "ListXRsError": { + reason: "The method should return an error if it can't list XRs.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(errBoom), + } + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "GetWatchesError": { + reason: "The method should return an error if it can't get watches.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + return nil, errBoom + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "StopWatchesError": { + reason: "The method should return an error if it can't stop watches.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + w := []engine.WatchID{ + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{}, + }, + } + return w, nil + }, + MockStopWatches: func(_ context.Context, _ string, _ ...engine.WatchID) (int, error) { + return 0, errBoom + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "NothingToStop": { + reason: "StopWatches shouldn't be called if there's no watches to stop.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + return nil, nil + }, + // StopWatches would panic if called, since it's not mocked. + }, + }, + want: want{ + err: nil, + }, + }, + "UneededWatchesStopped": { + reason: "StopWatches shouldn't be called if there's no watches to stop.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + xr := composite.New() + xr.SetResourceReferences([]corev1.ObjectReference{ + { + APIVersion: "example.org/v1", + Kind: "StillComposed", + // Name doesn't matter. + }, + }) + + obj.(*unstructured.UnstructuredList).Items = []unstructured.Unstructured{xr.Unstructured} + + return nil + }), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + w := []engine.WatchID{ + // We want to keep this one. + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "StillComposed", + }, + }, + // We want to GC this one. + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "GarbageCollectMe", + }, + }, + } + return w, nil + }, + MockStopWatches: func(_ context.Context, _ string, ws ...engine.WatchID) (int, error) { + want := []engine.WatchID{ + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "GarbageCollectMe", + }, + }, + } + + if diff := cmp.Diff(want, ws); diff != "" { + t.Errorf("\nMockStopWatches(...) -want, +got:\n%s", diff) + } + + return 0, nil + }, + }, + o: []GarbageCollectorOption{ + WithLogger(logging.NewNopLogger()), + }, + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + gc := NewGarbageCollector(tc.params.name, tc.params.of, tc.params.ce, tc.params.o...) + err := gc.GarbageCollectWatchesNow(tc.args.ctx) + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ngc.GarbageCollectWatchesNow(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/composition/reconciler.go b/internal/controller/apiextensions/composition/reconciler.go index f090dae5c..92d771421 100644 --- a/internal/controller/apiextensions/composition/reconciler.go +++ b/internal/controller/apiextensions/composition/reconciler.go @@ -36,7 +36,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" @@ -98,10 +97,8 @@ func WithRecorder(er event.Recorder) ReconcilerOption { // NewReconciler returns a Reconciler of Compositions. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - r := &Reconciler{ - client: kube, + client: mgr.GetClient(), log: logging.NewNopLogger(), record: event.NewNopRecorder(), } diff --git a/internal/controller/apiextensions/controller/options.go b/internal/controller/apiextensions/controller/options.go index 57c42c723..f0fe3799b 100644 --- a/internal/controller/apiextensions/controller/options.go +++ b/internal/controller/apiextensions/controller/options.go @@ -20,6 +20,7 @@ package controller import ( "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/xfn" ) @@ -27,6 +28,9 @@ import ( type Options struct { controller.Options + // ControllerEngine used to dynamically start and stop controllers. + ControllerEngine *engine.ControllerEngine + // FunctionRunner used to run Composition Functions. FunctionRunner *xfn.PackagedFunctionRunner } diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go deleted file mode 100644 index bb87b88b5..000000000 --- a/internal/controller/apiextensions/definition/composed.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package definition - -import ( - "context" - "strings" - "sync" - - "github.com/google/uuid" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - kcache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - cache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/crossplane/crossplane-runtime/pkg/logging" - - "github.com/crossplane/crossplane/internal/controller/engine" - "github.com/crossplane/crossplane/internal/xcrd" -) - -// composedResourceInformers manages composed resource informers referenced by -// composite resources. It serves as an event source for realtime notifications -// of changed composed resources, with the composite reconcilers as sinks. -// It keeps composed resource informers alive as long as there are composites -// referencing them. In parallel, the composite reconcilers keep track of -// references to composed resources, and inform composedResourceInformers about -// them via the WatchComposedResources method. -type composedResourceInformers struct { - log logging.Logger - cluster cluster.Cluster - - handler handler.EventHandler - ps []predicate.Predicate - - gvkRoutedCache *engine.GVKRoutedCache - - lock sync.RWMutex // everything below is protected by this lock - - // cdCaches holds the composed resource informers. These are dynamically - // started and stopped based on the composites that reference them. - cdCaches map[schema.GroupVersionKind]cdCache - // xrCaches holds the composite resource informers. We use them to find - // composites referencing a certain composed resource GVK. If no composite - // is left doing so, a composed resource informer is stopped. - xrCaches map[schema.GroupVersionKind]cache.Cache - sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid -} - -type cdCache struct { - cache cache.Cache - cancelFn context.CancelFunc -} - -var _ source.Source = &composedResourceInformers{} - -// Start implements source.Source, i.e. starting composedResourceInformers as -// source with h as the sink of update events. It keeps sending events until -// ctx is done. -// Note that Start can be called multiple times to deliver events to multiple -// (composite resource) controllers. -func (i *composedResourceInformers) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { - id := uuid.New().String() - - i.lock.Lock() - defer i.lock.Unlock() - i.sinks[id] = func(ev runtimeevent.UpdateEvent) { - for _, p := range i.ps { - if !p.Update(ev) { - return - } - } - i.handler.Update(ctx, ev, q) - } - - go func() { - <-ctx.Done() - - i.lock.Lock() - defer i.lock.Unlock() - delete(i.sinks, id) - }() - - return nil -} - -// RegisterComposite registers a composite resource cache with its GVK. -// Instances of this GVK will be considered to keep composed resource informers -// alive. -func (i *composedResourceInformers) RegisterComposite(gvk schema.GroupVersionKind, ca cache.Cache) { - i.lock.Lock() - defer i.lock.Unlock() - - if i.xrCaches == nil { - i.xrCaches = make(map[schema.GroupVersionKind]cache.Cache) - } - i.xrCaches[gvk] = ca -} - -// UnregisterComposite removes a composite resource cache from being considered -// to keep composed resource informers alive. -func (i *composedResourceInformers) UnregisterComposite(gvk schema.GroupVersionKind) { - i.lock.Lock() - defer i.lock.Unlock() - delete(i.xrCaches, gvk) -} - -// WatchComposedResources starts informers for the given composed resource GVKs. -// The is wired into the composite reconciler, which will call this method on -// every reconcile to make composedResourceInformers aware of the composed -// resources the given composite resource references. -// -// Note that this complements cleanupComposedResourceInformers which regularly -// garbage collects composed resource informers that are no longer referenced by -// any composite. -func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupVersionKind) { - i.lock.RLock() - defer i.lock.RUnlock() - - // start new informers - for _, gvk := range gvks { - if _, found := i.cdCaches[gvk]; found { - continue - } - - log := i.log.WithValues("gvk", gvk.String()) - - ca, err := cache.New(i.cluster.GetConfig(), cache.Options{}) - if err != nil { - log.Debug("failed creating a cache", "error", err) - continue - } - - // don't forget to call cancelFn in error cases to avoid leaks. In the - // happy case it's called from the go routine starting the cache below. - ctx, cancelFn := context.WithCancel(context.Background()) - - u := kunstructured.Unstructured{} - u.SetGroupVersionKind(gvk) - inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. - if err != nil { - cancelFn() - log.Debug("failed getting informer", "error", err) - continue - } - - if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - if old.GetResourceVersion() == obj.GetResourceVersion() { - return - } - - i.lock.RLock() - defer i.lock.RUnlock() - - ev := runtimeevent.UpdateEvent{ - ObjectOld: old, - ObjectNew: obj, - } - for _, handleFn := range i.sinks { - handleFn(ev) - } - }, - }); err != nil { - cancelFn() - log.Debug("failed adding event handler", "error", err) - continue - } - - go func() { - defer cancelFn() - - log.Info("Starting composed resource watch") - _ = ca.Start(ctx) - }() - - // TODO(negz): We should take a write lock before writing to this map. - i.cdCaches[gvk] = cdCache{ - cache: ca, - cancelFn: cancelFn, - } - - // wait for in the background, and only when synced add to the routed cache - go func(gvk schema.GroupVersionKind) { - if synced := ca.WaitForCacheSync(ctx); synced { - log.Debug("Composed resource cache synced") - i.gvkRoutedCache.AddDelegate(gvk, ca) - } - }(gvk) - } -} - -// cleanupComposedResourceInformers garbage collects composed resource informers -// that are no longer referenced by any composite resource. -// -// Note that this complements WatchComposedResources which starts informers for -// the composed resources referenced by a composite resource. -func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context.Context) { - crds := extv1.CustomResourceDefinitionList{} - if err := i.cluster.GetClient().List(ctx, &crds); err != nil { - i.log.Debug(errListCRDs, "error", err) - return - } - - // copy map to avoid locking it for the entire duration of the loop - xrCaches := make(map[schema.GroupVersionKind]cache.Cache, len(i.xrCaches)) - i.lock.RLock() - for gvk, ca := range i.xrCaches { - xrCaches[gvk] = ca - } - i.lock.RUnlock() - - // find all CRDs that some XR is referencing. This is O(CRDs * XRDs * versions). - // In practice, CRDs are 1000ish max, and XRDs are 10ish. So this is - // fast enough for now. It's all in-memory. - referenced := make(map[schema.GroupVersionKind]bool) - for _, crd := range crds.Items { - if !xcrd.IsEstablished(crd.Status) { - continue - } - - for _, v := range crd.Spec.Versions { - cdGVK := schema.GroupVersionKind{Group: crd.Spec.Group, Version: v.Name, Kind: crd.Spec.Names.Kind} - for xrGVK, xrCache := range xrCaches { - // list composites that reference this composed GVK - list := kunstructured.UnstructuredList{} - list.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := xrCache.List(ctx, &list, client.MatchingFields{compositeResourceRefGVKsIndex: cdGVK.String()}); err != nil { - i.log.Debug("cannot list composite resources referencing a certain composed resource GVK", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourceRefGVKsIndex+"="+cdGVK.String()) - continue - } - if len(list.Items) > 0 { - referenced[cdGVK] = true - } - } - } - } - - // stop old informers - for gvk, inf := range i.cdCaches { - if referenced[gvk] { - continue - } - inf.cancelFn() - i.gvkRoutedCache.RemoveDelegate(gvk) - i.log.Info("Stopped composed resource watch", "gvk", gvk.String()) - - // TODO(negz): We should take a write lock before writing to this map. - delete(i.cdCaches, gvk) - } -} - -func parseAPIVersion(v string) (string, string) { - parts := strings.SplitN(v, "/", 2) - switch len(parts) { - case 1: - return "", parts[0] - case 2: - return parts[0], parts[1] - default: - return "", "" - } -} diff --git a/internal/controller/apiextensions/definition/handlers.go b/internal/controller/apiextensions/definition/handlers.go index dc70b3680..d6f3fb1de 100644 --- a/internal/controller/apiextensions/definition/handlers.go +++ b/internal/controller/apiextensions/definition/handlers.go @@ -23,9 +23,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" - cache "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + kevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -36,68 +36,80 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -// For comp rev -// EnqueueForCompositionRevisionFunc returns a function that enqueues (the -// related) XRs when a new CompositionRevision is created. This speeds up -// reconciliation of XRs on changes to the Composition by not having to wait for -// the 60s sync period, but be instant. -func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - return func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - rev := createEvent.Object - - // get all XRs - xrs := kunstructured.UnstructuredList{} - xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) - xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") - if err := list(ctx, &xrs); err != nil { - // logging is most we can do here. This is a programming error if it happens. - log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) - return - } - - // enqueue all those that reference the Composition of this revision - compName := rev.Labels[v1.LabelCompositionName] - if compName == "" { - return - } - for _, u := range xrs.Items { - xr := composite.Unstructured{Unstructured: u} - - // only automatic - if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { - continue +// EnqueueForCompositionRevision enqueues reconciles for all XRs that will use a +// newly created CompositionRevision. +func EnqueueForCompositionRevision(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e kevent.CreateEvent, q workqueue.RateLimitingInterface) { + rev, ok := e.Object.(*v1.CompositionRevision) + if !ok { + // should not happen + return } - // only those that reference the right Composition - if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { - continue + // TODO(negz): Check whether the revision's compositeTypeRef matches + // the supplied CompositeKind. If it doesn't, we can return early. + + // get all XRs + xrs := kunstructured.UnstructuredList{} + xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) + xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") + // TODO(negz): Index XRs by composition revision name? + if err := c.List(ctx, &xrs); err != nil { + // logging is most we can do here. This is a programming error if it happens. + log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) + return } - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: xr.GetName(), - Namespace: xr.GetNamespace(), - }}) - } + // enqueue all those that reference the Composition of this revision + compName := rev.Labels[v1.LabelCompositionName] + // TODO(negz): Check this before we get all XRs. + if compName == "" { + return + } + for _, u := range xrs.Items { + xr := composite.Unstructured{Unstructured: u} + + // only automatic + if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { + continue + } + + // only those that reference the right Composition + if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { + continue + } + + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: xr.GetName(), + Namespace: xr.GetNamespace(), + }}) + } + }, } } -// TODO(negz): Figure out a way to plumb this with controller-runtime v0.18.x -// style sources. - -func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { //nolint:unused // See comment above. - return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) - composites := kunstructured.UnstructuredList{} - composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { - log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) - return - } - // queue those composites for reconciliation - for _, xr := range composites.Items { - log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) - } +// EnqueueCompositeResources enqueues reconciles for all XRs that reference an +// updated composed resource. +func EnqueueCompositeResources(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + UpdateFunc: func(ctx context.Context, ev kevent.UpdateEvent, q workqueue.RateLimitingInterface) { + xrGVK := schema.GroupVersionKind(of) + cdGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() + key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), cdGVK.Kind, cdGVK.GroupVersion().String()) + + composites := kunstructured.UnstructuredList{} + composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) + if err := c.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { + log.Debug("cannot list composite resources related to a composed resource change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) + return + } + + // queue those composites for reconciliation + for _, xr := range composites.Items { + log.Debug("Enqueueing composite resource because composed resource changed", "name", xr.GetName(), "cdGVK", cdGVK.String(), "cdName", ev.ObjectNew.GetName()) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) + } + }, } } diff --git a/internal/controller/apiextensions/definition/handlers_test.go b/internal/controller/apiextensions/definition/handlers_test.go index c9e407d25..9992c0931 100644 --- a/internal/controller/apiextensions/definition/handlers_test.go +++ b/internal/controller/apiextensions/definition/handlers_test.go @@ -1,27 +1,10 @@ -/* -Copyright 2024 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package definition import ( "context" - "reflect" "testing" - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -29,21 +12,23 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + kevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) func TestEnqueueForCompositionRevisionFunc(t *testing.T) { type args struct { - of schema.GroupVersionKind - list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - event runtimeevent.TypedCreateEvent[*v1.CompositionRevision] + of schema.GroupVersionKind + reader client.Reader + event kevent.CreateEvent } type want struct { added []interface{} @@ -52,57 +37,51 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} dogList := dog.GroupVersion().WithKind("DogList") - tests := []struct { - name string - args args - want want + tests := map[string]struct { + reason string + args args + want want }{ - { - name: "empty", + "NoXRs": { + reason: "If there are no XRs of the specified type, no reconciles should be enqueued.", args: args{ of: dog, - list: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { - // test parameters only here, not in the later tests for brevity. - u, ok := list.(*kunstructured.UnstructuredList) - if !ok { - t.Errorf("list was not an UnstructuredList") - } else if got := u.GroupVersionKind(); got != dogList { - t.Errorf("list was not a DogList, got: %s", got) - } - if len(opts) != 0 { - t.Errorf("unexpected list options: %#v", opts) - } - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { + // test parameters only here, not in the later tests for brevity. + u, ok := list.(*kunstructured.UnstructuredList) + if !ok { + t.Errorf("list was not an UnstructuredList") + } else if got := u.GroupVersionKind(); got != dogList { + t.Errorf("list was not a DogList, got: %s", got) + } + if len(opts) != 0 { + t.Errorf("unexpected list options: %#v", opts) + } + return nil }, }, }, }, - { - name: "automatic management policy", + "AutomaticManagementPolicy": { + reason: "A reconcile should be enqueued for XRs with an automatic revision update policy.", args: args{ of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + event: kevent.CreateEvent{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -120,23 +99,25 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { }}}, }, }, - { - name: "manual management policy", + "ManualManagementPolicy": { + reason: "A reconcile shouldn't be enqueued for XRs with a manual revision update policy.", args: args{ of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateManual - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateManual + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + event: kevent.CreateEvent{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -149,23 +130,25 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { }, want: want{}, }, - { - name: "other composition", + "OtherComposition": { + reason: "A reconcile shouldn't be enqueued for an XR that references a different Composition", args: args{ of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + event: kevent.CreateEvent{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -178,39 +161,41 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { }, want: want{}, }, - { - name: "multiple", + "Multiple": { + reason: "Reconciles should be enqueued only for the XRs that reference the relevant Composition, and have an automatic composition revision update policy.", args: args{ of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - automatic := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&automatic) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - obj2 := obj1.DeepCopy() - obj2.SetName("obj2") - - obj3 := obj1.DeepCopy() - obj3.SetName("obj3") - obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - obj4 := obj1.DeepCopy() - obj4.SetName("obj4") - manual := xpv1.UpdateManual - obj4.SetCompositionUpdatePolicy(&manual) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - obj1.Unstructured, - obj2.Unstructured, - obj3.Unstructured, - } - - return nil + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + automatic := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&automatic) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + obj2 := obj1.DeepCopy() + obj2.SetName("obj2") + + obj3 := obj1.DeepCopy() + obj3.SetName("obj3") + obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + obj4 := obj1.DeepCopy() + obj4.SetName("obj4") + manual := xpv1.UpdateManual + obj4.SetCompositionUpdatePolicy(&manual) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + obj1.Unstructured, + obj2.Unstructured, + obj3.Unstructured, + } + + return nil + }, }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ + event: kevent.CreateEvent{ Object: &v1.CompositionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "dachshund-sadfa8", @@ -229,13 +214,14 @@ func TestEnqueueForCompositionRevisionFunc(t *testing.T) { }, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fn := EnqueueForCompositionRevisionFunc(resource.CompositeKind(tt.args.of), tt.args.list, logging.NewNopLogger()) + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + fns := EnqueueForCompositionRevision(resource.CompositeKind(tc.args.of), tc.args.reader, logging.NewNopLogger()) q := rateLimitingQueueMock{} - fn(context.TODO(), tt.args.event, &q) - if got := q.added; !reflect.DeepEqual(got, tt.want.added) { - t.Errorf("EnqueueForCompositionRevisionFunc(...)(ctx, event, queue) = %v, want %v", got, tt.want) + fns.Create(context.TODO(), tc.args.event, &q) + + if diff := cmp.Diff(tc.want.added, q.added); diff != "" { + t.Errorf("\n%s\nfns.Create(...): -want, +got:\n%s", tc.reason, diff) } }) } diff --git a/internal/controller/apiextensions/definition/indexes.go b/internal/controller/apiextensions/definition/indexes.go index f4829dfa7..bfb622e0f 100644 --- a/internal/controller/apiextensions/definition/indexes.go +++ b/internal/controller/apiextensions/definition/indexes.go @@ -20,44 +20,18 @@ import ( "fmt" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" ) const ( - // compositeResourceRefGVKsIndex is an index of all GroupKinds that - // are in use by a Composition. It indexes from spec.resourceRefs, not - // from spec.resources. Hence, it will also work with functions. - compositeResourceRefGVKsIndex = "compositeResourceRefsGVKs" // compositeResourcesRefsIndex is an index of resourceRefs that are owned // by a composite. compositeResourcesRefsIndex = "compositeResourcesRefs" ) -var ( - _ client.IndexerFunc = IndexCompositeResourceRefGVKs - _ client.IndexerFunc = IndexCompositeResourcesRefs -) - -// IndexCompositeResourceRefGVKs assumes the passed object is a composite. It -// returns gvk keys for every resource referenced in the composite. -func IndexCompositeResourceRefGVKs(o client.Object) []string { - u, ok := o.(*kunstructured.Unstructured) - if !ok { - return nil // should never happen - } - xr := composite.Unstructured{Unstructured: *u} - refs := xr.GetResourceReferences() - keys := make([]string, 0, len(refs)) - for _, ref := range refs { - group, version := parseAPIVersion(ref.APIVersion) - keys = append(keys, schema.GroupVersionKind{Group: group, Version: version, Kind: ref.Kind}.String()) - } - // unification is done by the informer. - return keys -} +var _ client.IndexerFunc = IndexCompositeResourcesRefs // IndexCompositeResourcesRefs assumes the passed object is a composite. It // returns keys for every composed resource referenced in the composite. diff --git a/internal/controller/apiextensions/definition/indexes_test.go b/internal/controller/apiextensions/definition/indexes_test.go index 3e725ddc0..40fc1315e 100644 --- a/internal/controller/apiextensions/definition/indexes_test.go +++ b/internal/controller/apiextensions/definition/indexes_test.go @@ -25,50 +25,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestIndexCompositeResourceRefGVKs(t *testing.T) { - type args struct { - object client.Object - } - tests := map[string]struct { - args args - want []string - }{ - "Nil": {args: args{object: nil}, want: nil}, - "NotUnstructured": {args: args{object: &corev1.Pod{}}, want: nil}, - "NoRefs": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{}, - }, - }}, want: []string{}}, - "References": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{ - "resourceRefs": []interface{}{ - map[string]interface{}{ - "apiVersion": "nop.crossplane.io/v1alpha1", - "kind": "NopResource", - "name": "mr", - }, - map[string]interface{}{ - "apiVersion": "nop.example.org/v1alpha1", - "kind": "NopResource", - "name": "xr", - }, - }, - }, - }, - }}, want: []string{"nop.crossplane.io/v1alpha1, Kind=NopResource", "nop.example.org/v1alpha1, Kind=NopResource"}}, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := IndexCompositeResourceRefGVKs(tc.args.object) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nIndexCompositeResourceRefGVKs(...): -want, +got:\n%s", name, diff) - } - }) - } -} - func TestIndexCompositeResourcesRefs(t *testing.T) { type args struct { object client.Object diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 9ff199b8b..58ce14d66 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -28,19 +28,13 @@ import ( kmeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/cache" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane/crossplane-runtime/pkg/connection" "github.com/crossplane/crossplane-runtime/pkg/controller" @@ -51,13 +45,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" + "github.com/crossplane/crossplane/internal/controller/apiextensions/composite/watch" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" - "github.com/crossplane/crossplane/internal/controller/engine" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -72,6 +66,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource controller" + errStopController = "cannot stop composite resource controller" + errStartWatches = "cannot start composite resource controller watches" errAddIndex = "cannot add composite GVK index" errAddFinalizer = "cannot add composite resource finalizer" errRemoveFinalizer = "cannot remove composite resource finalizer" @@ -96,12 +92,50 @@ const ( ) // A ControllerEngine can start and stop Kubernetes controllers on demand. +// +//nolint:interfacebloat // We use this interface to stub the engine for testing, and we need all of its functionality. type ControllerEngine interface { + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) - Start(name string, o kcontroller.Options, w ...engine.Watch) error - Stop(name string) - Err(name string) error + GetWatches(name string) ([]engine.WatchID, error) + StartWatches(name string, ws ...engine.Watch) error + StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + GetClient() client.Client + GetFieldIndexer() client.FieldIndexer +} + +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// GetWatches does nothing. +func (e *NopEngine) GetWatches(_ string) ([]engine.WatchID, error) { return nil, nil } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// StopWatches does nothing. +func (e *NopEngine) StopWatches(_ context.Context, _ string, _ ...engine.WatchID) (int, error) { + return 0, nil +} + +// GetClient returns a nil client. +func (e *NopEngine) GetClient() client.Client { + return nil +} + +// GetFieldIndexer returns a nil field indexer. +func (e *NopEngine) GetFieldIndexer() client.FieldIndexer { + return nil } // A CRDRenderer renders a CompositeResourceDefinition's corresponding @@ -125,24 +159,12 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "defined/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(mgr.GetClient()), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) - if o.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // Register a runnable regularly checking whether the watch composed - // resources are still referenced by composite resources. If not, the - // composed resource informer is stopped. - if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - // Run every minute. - wait.UntilWithContext(ctx, r.xrInformers.cleanupComposedResourceInformers, time.Minute) - return nil - })); err != nil { - return errors.Wrap(err, errCannotAddInformerLoopToManager) - } - } - return ctrl.NewControllerManagedBy(mgr). Named(name). For(&v1.CompositeResourceDefinition{}). @@ -158,7 +180,6 @@ type ReconcilerOption func(*Reconciler) func WithLogger(log logging.Logger) ReconcilerOption { return func(r *Reconciler) { r.log = log - r.xrInformers.log = log } } @@ -189,7 +210,7 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of composite controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.composite.ControllerEngine = c + r.engine = c } } @@ -201,48 +222,29 @@ func WithCRDRenderer(c CRDRenderer) ReconcilerOption { } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } -} - type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } -// NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - - ca := engine.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()) +// NewClientApplicator returns a ClientApplicator suitable for use by the +// definition controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Use server-side apply instead of a ClientApplicator. + return resource.ClientApplicator{Client: c, Applicator: resource.NewAPIUpdatingApplicator(c)} +} +// NewReconciler returns a Reconciler of CompositeResourceDefinitions. +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, composite: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), - ControllerEngine: engine.New(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, - xrInformers: composedResourceInformers{ - log: logging.NewNopLogger(), - cluster: mgr, - - gvkRoutedCache: ca, - cdCaches: make(map[schema.GroupVersionKind]cdCache), - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), - }, + engine: &NopEngine{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -256,27 +258,24 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { f(r) } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // wrap the manager's cache to route requests to dynamically started - // informers for managed resources. - r.mgr = engine.WithGVKRoutedCache(ca, mgr) - } - return r } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { + // This client should only be used by this XRD controller, not the XR + // controllers it manages. XR controllers should use the engine's client. + // This ensures XR controllers will use a client backed by the same cache + // used to power their watches. client resource.ClientApplicator - mgr manager.Manager composite definition + engine ControllerEngine + log logging.Logger record event.Recorder - xrInformers composedResourceInformers - options apiextensionscontroller.Options } @@ -337,11 +336,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.stopCompositeController(d) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.composite.RemoveFinalizer(ctx, d); err != nil { @@ -391,9 +393,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.stopCompositeController(d) + // The controller must be stopped before the deletion of the CRD so that + // it doesn't crash. + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -441,36 +447,29 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceTypeRef desired := v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) - switch { - case observed.APIVersion != "" && observed != desired: - r.stopCompositeController(d) + if observed.APIVersion != "" && observed != desired { + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonEstablishXR, err)) + return reconcile.Result{}, err + } log.Debug("Referenceable version changed; stopped composite resource controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) - case r.composite.IsRunning(composite.ControllerName(d.GetName())): + } + + if r.engine.IsRunning(composite.ControllerName(d.GetName())) { log.Debug("Composite resource controller is running") d.Status.SetConditions(v1.WatchingComposite()) return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) - default: - if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error. Going to restart it", "error", err) - } else { - log.Debug("Composite resource controller is not running. Going to start it") - } } - ro := r.CompositeReconcilerOptions(d) + ro := r.CompositeReconcilerOptions(ctx, d) ck := resource.CompositeKind(d.GetCompositeGroupVersionKind()) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ro = append(ro, composite.WithKindObserver(composite.KindObserverFunc(r.xrInformers.WatchComposedResources))) - } - cr := composite.NewReconciler(r.mgr, ck, ro...) + + cr := composite.NewReconciler(r.engine.GetClient(), ck, ro...) ko := r.options.ForControllerRuntime() // Most controllers use this type of rate limiter to backoff requeues from 1 @@ -483,90 +482,61 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco ko.Reconciler = ratelimiter.NewReconciler(composite.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) xrGVK := d.GetCompositeGroupVersionKind() - - u := &kunstructured.Unstructured{} - u.SetGroupVersionKind(xrGVK) - name := composite.ControllerName(d.GetName()) - var ca cache.Cache - watches := []engine.Watch{ - engine.WatchFor(u, &handler.EnqueueRequestForObject{}), - // enqueue composites whenever a matching CompositionRevision is created - engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}, handler.TypedFuncs[*v1.CompositionRevision]{ - CreateFunc: EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), - })), + + // TODO(negz): Update CompositeReconcilerOptions to produce + // ControllerOptions instead? It bothers me that this is the only feature + // flagged block outside that method. + co := []engine.ControllerOption{engine.WithRuntimeOptions(ko)} + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + // If realtime composition is enabled we'll start watches dynamically, + // so we want to garbage collect watches for composed resource kinds + // that aren't used anymore. + gc := watch.NewGarbageCollector(name, resource.CompositeKind(xrGVK), r.engine, watch.WithLogger(log)) + co = append(co, engine.WithWatchGarbageCollector(gc)) } - // TODO(negz): I can't find a great way to plumb this. We now need to pass - // the handler when creating the source (i.e. the xrInformers). The - // xrInformers is designed to handle multiple types, though. Given I plan to - // try refactor realtime compositions to make use of new controller-runtime - // functionality around stopping informers, I'm going to just comment it out - // rather than spend time getting it working. - - // if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // // enqueue XRs that when a relevant MR is updated - // watches = append(watches, engine.WatchTriggeredBy(&r.xrInformers, handler.TypedFuncs[*kunstructured.Unstructured]{ - // UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent[*kunstructured.Unstructured], q workqueue.RateLimitingInterface) { - // enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) - // }, - // })) - // } - - c, err := r.composite.Create(name, ko, watches...) - if err != nil { + if err := r.engine.Start(name, co...); err != nil { log.Debug(errStartController, "error", err) err = errors.Wrap(err, errStartController) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ca = c.GetCache() - if err := ca.IndexField(ctx, u, compositeResourceRefGVKsIndex, IndexCompositeResourceRefGVKs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - if err := ca.IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - } - - if err := c.Start(context.Background()); err != nil { //nolint:contextcheck // the controller actually runs in the background. - log.Debug(errStartController, "error", err) - err = errors.Wrap(err, errStartController) + // This must be *unstructured.Unstructured, not *composite.Unstructured. + // controller-runtime doesn't support watching types that satisfy the + // runtime.Unstructured interface - only *unstructured.Unstructured. + xr := &kunstructured.Unstructured{} + xr.SetGroupVersionKind(xrGVK) + + crh := EnqueueForCompositionRevision(resource.CompositeKind(xrGVK), r.engine.GetClient(), log) + if err := r.engine.StartWatches(name, + engine.WatchFor(xr, engine.WatchTypeCompositeResource, &handler.EnqueueRequestForObject{}), + engine.WatchFor(&v1.CompositionRevision{}, engine.WatchTypeCompositionRevision, crh), + ); err != nil { + log.Debug(errStartWatches, "error", err) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource controller") - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.RegisterComposite(xrGVK, ca) - } + log.Debug("Started composite resource controller") d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) d.Status.SetConditions(v1.WatchingComposite()) return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) } -func (r *Reconciler) stopCompositeController(d *v1.CompositeResourceDefinition) { - r.composite.Stop(composite.ControllerName(d.GetName())) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) - } -} - // CompositeReconcilerOptions builds the options for a composite resource // reconciler. The options vary based on the supplied feature flags. -func (r *Reconciler) CompositeReconcilerOptions(d *v1.CompositeResourceDefinition) []composite.ReconcilerOption { +func (r *Reconciler) CompositeReconcilerOptions(ctx context.Context, d *v1.CompositeResourceDefinition) []composite.ReconcilerOption { // The default set of reconciler options when no feature flags are enabled. o := []composite.ReconcilerOption{ - composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys())), + composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(r.engine.GetClient(), d.GetConnectionSecretKeys())), composite.WithCompositionSelector(composite.NewCompositionSelectorChain( composite.NewEnforcedCompositionSelector(*d, r.record), - composite.NewAPIDefaultCompositionSelector(r.client, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), r.record), - composite.NewAPILabelSelectorResolver(r.client), + composite.NewAPIDefaultCompositionSelector(r.engine.GetClient(), *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), r.record), + composite.NewAPILabelSelectorResolver(r.engine.GetClient()), )), composite.WithLogger(r.log.WithValues("controller", composite.ControllerName(d.GetName()))), composite.WithRecorder(r.record.WithAnnotations("controller", composite.ControllerName(d.GetName()))), @@ -579,13 +549,13 @@ func (r *Reconciler) CompositeReconcilerOptions(d *v1.CompositeResourceDefinitio // subsequently skipped if the environment is nil. if r.options.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { o = append(o, - composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(r.client)), - composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(r.client))) + composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(r.engine.GetClient())), + composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(r.engine.GetClient()))) } // If external secret stores aren't enabled we just fetch connection details // from Kubernetes secrets. - var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(r.client) + var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(r.engine.GetClient()) // We only want to enable ExternalSecretStore support if the relevant // feature flag is enabled. Otherwise, we start the XR reconcilers with @@ -595,46 +565,46 @@ func (r *Reconciler) CompositeReconcilerOptions(d *v1.CompositeResourceDefinitio // the composite resource. if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := []managed.ConnectionPublisher{ - composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys()), - composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, + composite.NewAPIFilteredSecretPublisher(r.engine.GetClient(), d.GetConnectionSecretKeys()), + composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(r.engine.GetClient(), v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), } // If external secret stores are enabled we need to support fetching // connection details from both secrets and external stores. fetcher = composite.ConnectionDetailsFetcherChain{ - composite.NewSecretConnectionDetailsFetcher(r.client), - connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), + composite.NewSecretConnectionDetailsFetcher(r.engine.GetClient()), + connection.NewDetailsManager(r.engine.GetClient(), v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } cc := composite.NewConfiguratorChain( - composite.NewAPINamingConfigurator(r.client), - composite.NewAPIConfigurator(r.client), - composite.NewSecretStoreConnectionDetailsConfigurator(r.client), + composite.NewAPINamingConfigurator(r.engine.GetClient()), + composite.NewAPIConfigurator(r.engine.GetClient()), + composite.NewSecretStoreConnectionDetailsConfigurator(r.engine.GetClient()), ) o = append(o, composite.WithConnectionPublishers(pc...), composite.WithConfigurator(cc), - composite.WithComposer(composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)))) + composite.WithComposer(composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)))) } // If Composition Functions are enabled we use two different Composer // implementations. One supports P&T (aka 'Resources mode') and the other // Functions (aka 'Pipeline mode'). if r.options.Features.Enabled(features.EnableBetaCompositionFunctions) { - ptc := composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)) + ptc := composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)) fcopts := []composite.FunctionComposerOption{ - composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.client, fetcher)), + composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.engine.GetClient(), fetcher)), composite.WithCompositeConnectionDetailsFetcher(fetcher), } if r.options.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(r.client))) + fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(r.engine.GetClient()))) } - fc := composite.NewFunctionComposer(r.client, r.options.FunctionRunner, fcopts...) + fc := composite.NewFunctionComposer(r.engine.GetClient(), r.options.FunctionRunner, fcopts...) // Note that if external secret stores are enabled this will supersede // the WithComposer option specified in that block. @@ -657,5 +627,22 @@ func (r *Reconciler) CompositeReconcilerOptions(d *v1.CompositeResourceDefinitio }))) } + // If realtime compositions are enabled we pass the ControllerEngine to the + // XR reconciler so that it can start watches for composed resources. + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + gvk := d.GetCompositeGroupVersionKind() + u := &kunstructured.Unstructured{} + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) + + // Add an index to the controller engine's client. + if err := r.engine.GetFieldIndexer().IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { + r.log.Debug(errAddIndex, "error", err) + } + + h := EnqueueCompositeResources(resource.CompositeKind(d.GetCompositeGroupVersionKind()), r.engine.GetClient(), r.log) + o = append(o, composite.WithWatchStarter(composite.ControllerName(d.GetName()), h, r.engine)) + } + return o } diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index e89f2322e..c92ca9b62 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -20,63 +20,70 @@ import ( "context" "testing" - "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlconfig "sigs.k8s.io/controller-runtime/pkg/config" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" - "github.com/crossplane/crossplane/internal/controller/engine" - "github.com/crossplane/crossplane/internal/features" + "github.com/crossplane/crossplane/internal/engine" +) + +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} ) type MockEngine struct { - ControllerEngine - MockIsRunning func(name string) bool - MockCreate func(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) - MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockGetWatches func(name string) ([]engine.WatchID, error) + MockStartWatches func(name string, ws ...engine.Watch) error + MockStopWatches func(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + MockGetClient func() client.Client + MockGetFieldIndexer func() client.FieldIndexer } func (m *MockEngine) IsRunning(name string) bool { return m.MockIsRunning(name) } -func (m *MockEngine) Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) { - return m.MockCreate(name, o, w...) +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) +} + +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) +} + +func (m *MockEngine) GetWatches(name string) ([]engine.WatchID, error) { + return m.MockGetWatches(name) +} + +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { - return m.MockStart(name, o, w...) +func (m *MockEngine) StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) { + return m.MockStopWatches(ctx, name, ws...) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() } -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) GetFieldIndexer() client.FieldIndexer { + return m.MockGetFieldIndexer() } func TestReconcile(t *testing.T) { @@ -86,7 +93,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -102,13 +109,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -118,13 +122,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -134,13 +135,12 @@ func TestReconcile(t *testing.T) { "RenderCustomResourceDefinitionError": { reason: "We should return any error we encounter rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -153,18 +153,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -177,24 +176,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -204,24 +202,54 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errGetCRD), }, }, - "RemoveFinalizerError": { - reason: "We should return any error we encounter while removing a finalizer.", + "CustomResourceDefinitionNotFoundStopControllerError": { + reason: "We should return any error we encounter while stopping our controller (just in case) when the CRD doesn't exist.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "RemoveFinalizerError": { + reason: "We should return any error we encounter while removing a finalizer.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -237,21 +265,20 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -267,29 +294,28 @@ func TestReconcile(t *testing.T) { "DeleteAllCustomResourcesError": { reason: "We should return any error we encounter while deleting all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -302,30 +328,29 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -338,36 +363,35 @@ func TestReconcile(t *testing.T) { "WaitForDeleteAllOf": { reason: "We should record the pending deletion of defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -377,34 +401,73 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "DeleteCustomResourceDefinitionError": { - reason: "We should return any error we encounter while deleting the CRD we created.", + "StopControllerError": { + reason: "We should return any error we encounter while stopping our controller.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "DeleteCustomResourceDefinitionError": { + reason: "We should return any error we encounter while deleting the CRD we created.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -417,42 +480,41 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingComposite()) - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingComposite()) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -465,13 +527,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -487,16 +548,15 @@ func TestReconcile(t *testing.T) { "ApplyCustomResourceDefinitionError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -512,16 +572,15 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -534,32 +593,84 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "CreateControllerError": { - reason: "We should return any error we encounter while starting our controller.", + "VersionChangedStopControllerError": { + reason: "We should return any error we encounter while stopping our controller because the XRD's referencable version changed.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + xrd := &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + Group: "example.org", + Names: extv1.CustomResourceDefinitionNames{ + Kind: "XR", + }, + Versions: []v1.CompositeResourceDefinitionVersion{ + { + Name: "v2", + Referenceable: true, + }, + { + Name: "v1", + }, + }, + }, + Status: v1.CompositeResourceDefinitionStatus{ + Controllers: v1.CompositeResourceDefinitionControllerStatus{ + CompositeResourceTypeRef: v1.TypeReference{ + APIVersion: "example.org/v1", + Kind: "XR", + }, + }, + }, + } + + *obj.(*v1.CompositeResourceDefinition) = *xrd + return nil + }), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStop: func(_ context.Context, _ string) error { + return errBoom }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStopController), + }, + }, + "StartControllerError": { + reason: "We should return any error we encounter while starting our controller.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -574,10 +685,10 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return false }, - MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return nil, errBoom + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return errBoom }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -586,41 +697,68 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errStartController), }, }, - "SuccessfulStart": { - reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + "StartWatchesError": { + reason: "We should return any error we encounter while starting watches.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return nil + }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { + return errBoom }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStartWatches), + }, + }, + "SuccessfulStart": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -634,23 +772,10 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockIsRunning: func(_ string) bool { return false }, - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return mockNamedController{ - MockStart: func(_ context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(_ context.Context) bool { - return true - }, - } - }, - }, nil - }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -661,51 +786,37 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should return without requeueing if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) - }, - }, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -719,23 +830,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return mockNamedController{ - MockStart: func(_ context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(_ context.Context) bool { - return true - }, - } - }, - }, nil - }, - MockStop: func(_ string) {}, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -746,38 +845,24 @@ func TestReconcile(t *testing.T) { "NotRestartingWithoutVersionChange": { reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) - }, - }, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -792,10 +877,9 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return true }, - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - t.Errorf("MockCreate should not be called") - return nil, nil + MockStart: func(_ string, _ ...engine.ControllerOption) error { + t.Errorf("MockStart should not be called") + return nil }, }), }, @@ -806,105 +890,17 @@ func TestReconcile(t *testing.T) { }, } - // Run every test with and without the realtime compositions feature. - rtc := apiextensionscontroller.Options{Options: controller.DefaultOptions()} - rtc.Features.Enable(features.EnableAlphaRealtimeCompositions) + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := NewReconciler(tc.args.ca, tc.args.opts...) + got, err := r.Reconcile(context.Background(), reconcile.Request{}) - type mode struct { - name string - extra []ReconcilerOption - } - for _, m := range []mode{ - {name: "Default"}, - {name: string(features.EnableAlphaRealtimeCompositions), extra: []ReconcilerOption{WithOptions(rtc)}}, - } { - t.Run(m.name, func(t *testing.T) { - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, m.extra...)...) - got, err := r.Reconcile(context.Background(), reconcile.Request{}) - - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) - } - }) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) } }) } } - -type mockNamedController struct { - MockStart func(_ context.Context) error - MockGetCache func() cache.Cache -} - -func (m mockNamedController) Start(ctx context.Context) error { - return m.MockStart(ctx) -} - -func (m mockNamedController) GetCache() cache.Cache { - return m.MockGetCache() -} - -type mockManager struct { - manager.Manager - - GetCacheFn func() cache.Cache - GetClientFn func() client.Client - GetSchemeFn func() *runtime.Scheme - GetRESTMapperFn func() meta.RESTMapper - GetConfigFn func() *rest.Config - GetLoggerFn func() logr.Logger - GetControllerOptionsFn func() ctrlconfig.Controller -} - -func (m *mockManager) GetCache() cache.Cache { - return m.GetCacheFn() -} - -func (m *mockManager) GetClient() client.Client { - return m.GetClientFn() -} - -func (m *mockManager) GetScheme() *runtime.Scheme { - return m.GetSchemeFn() -} - -func (m *mockManager) GetRESTMapper() meta.RESTMapper { - return m.GetRESTMapperFn() -} - -func (m *mockManager) GetConfig() *rest.Config { - return m.GetConfigFn() -} - -func (m *mockManager) GetLogger() logr.Logger { - return m.GetLoggerFn() -} - -func (m *mockManager) GetControllerOptions() ctrlconfig.Controller { - return m.GetControllerOptionsFn() -} - -type mockCache struct { - cache.Cache - - ListFn func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - IndexFieldFn func(_ context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error - WaitForCacheSyncFn func(_ context.Context) bool -} - -func (m *mockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return m.ListFn(ctx, list, opts...) -} - -func (m *mockCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - return m.IndexFieldFn(ctx, obj, field, extractValue) -} - -func (m *mockCache) WaitForCacheSync(ctx context.Context) bool { - return m.WaitForCacheSyncFn(ctx) -} diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index 1c5d0e2a9..86dc59fe7 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -31,9 +31,8 @@ import ( "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/connection" @@ -44,13 +43,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" secretsv1alpha1 "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/claim" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" - "github.com/crossplane/crossplane/internal/controller/engine" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/names" "github.com/crossplane/crossplane/internal/xcrd" @@ -69,6 +67,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource claim CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource claim controller" + errStopController = "cannot stop composite resource claim controller" + errStartWatches = "cannot start composite resource claim controller watches" errAddFinalizer = "cannot add composite resource claim finalizer" errRemoveFinalizer = "cannot remove composite resource claim finalizer" errDeleteCRD = "cannot delete composite resource claim CustomResourceDefinition" @@ -91,10 +91,36 @@ const ( // A ControllerEngine can start and stop Kubernetes controllers on demand. type ControllerEngine interface { + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Start(name string, o kcontroller.Options, w ...engine.Watch) error - Stop(name string) - Err(name string) error + StartWatches(name string, ws ...engine.Watch) error + GetClient() client.Client +} + +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// GetClient returns a nil client. +func (e *NopEngine) GetClient() client.Client { + return nil +} + +// GetFieldIndexer returns a nil field indexer. +func (e *NopEngine) GetFieldIndexer() client.FieldIndexer { + return nil } // A CRDRenderer renders a CompositeResourceDefinition's corresponding @@ -119,9 +145,10 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "offered/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(mgr.GetClient()), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) return ctrl.NewControllerManagedBy(mgr). @@ -169,7 +196,7 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of claim controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.claim.ControllerEngine = c + r.engine = c } } @@ -181,32 +208,25 @@ func WithCRDRenderer(c CRDRenderer) ReconcilerOption { } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } +// NewClientApplicator returns a ClientApplicator suitable for use by the +// offered controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Use server-side apply instead of a ClientApplicator. + return resource.ClientApplicator{Client: c, Applicator: resource.NewAPIUpdatingApplicator(c)} } // NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, claim: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), - ControllerEngine: engine.New(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, + engine: &NopEngine{}, + log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -223,17 +243,21 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { - mgr manager.Manager + // This client should only be used by this XRD controller, not the claim + // controllers it manages. Claim controllers should use the engine's client. + // This ensures claim controllers will use a client backed by the same cache + // used to power their watches. client resource.ClientApplicator claim definition + engine ControllerEngine + log logging.Logger record event.Recorder @@ -294,11 +318,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.claim.Stop(claim.ControllerName(d.GetName())) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.claim.RemoveFinalizer(ctx, d); err != nil { @@ -349,9 +376,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.claim.Stop(claim.ControllerName(d.GetName())) + // The controller should be stopped before the deletion of CRD so that + // it doesn't crash. + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -408,8 +439,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // upgrading fields that were previously managed using client-side apply. if r.options.Features.Enabled(features.EnableAlphaClaimSSA) { o = append(o, - claim.WithCompositeSyncer(claim.NewServerSideCompositeSyncer(r.client, names.NewNameGenerator(r.client))), - claim.WithManagedFieldsUpgrader(claim.NewPatchingManagedFieldsUpgrader(r.client)), + claim.WithCompositeSyncer(claim.NewServerSideCompositeSyncer(r.engine.GetClient(), names.NewNameGenerator(r.engine.GetClient()))), + claim.WithManagedFieldsUpgrader(claim.NewPatchingManagedFieldsUpgrader(r.engine.GetClient())), ) } @@ -418,50 +449,64 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // their default Connection Propagator. if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := claim.ConnectionPropagatorChain{ - claim.NewAPIConnectionPropagator(r.client), - connection.NewDetailsManager(r.client, secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), + claim.NewAPIConnectionPropagator(r.engine.GetClient()), + connection.NewDetailsManager(r.engine.GetClient(), secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } o = append(o, claim.WithConnectionPropagator(pc), claim.WithConnectionUnpublisher( - claim.NewSecretStoreConnectionUnpublisher(connection.NewDetailsManager(r.client, + claim.NewSecretStoreConnectionUnpublisher(connection.NewDetailsManager(r.engine.GetClient(), secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig))))) } - cr := claim.NewReconciler(r.mgr, - resource.CompositeClaimKind(d.GetClaimGroupVersionKind()), - resource.CompositeKind(d.GetCompositeGroupVersionKind()), o...) - - ko := r.options.ForControllerRuntime() - ko.Reconciler = ratelimiter.NewReconciler(claim.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) - - if err := r.claim.Err(claim.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceClaimTypeRef desired := v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) if observed.APIVersion != "" && observed != desired { - r.claim.Stop(claim.ControllerName(d.GetName())) + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } log.Debug("Referenceable version changed; stopped composite resource claim controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) } + if r.engine.IsRunning(claim.ControllerName(d.GetName())) { + log.Debug("Composite resource claim controller is running") + d.Status.SetConditions(v1.WatchingClaim()) + return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) + } + + cr := claim.NewReconciler(r.engine.GetClient(), + resource.CompositeClaimKind(d.GetClaimGroupVersionKind()), + resource.CompositeKind(d.GetCompositeGroupVersionKind()), o...) + + ko := r.options.ForControllerRuntime() + ko.Reconciler = ratelimiter.NewReconciler(claim.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) + + if err := r.engine.Start(claim.ControllerName(d.GetName()), engine.WithRuntimeOptions(ko)); err != nil { + err = errors.Wrap(err, errStartController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } + log.Debug("Started composite resource claim controller") + + // These must be *unstructured.Unstructured, not e.g. *claim.Unstructured. + // controller-runtime doesn't support watching types that satisfy the + // runtime.Unstructured interface - only *unstructured.Unstructured. cm := &kunstructured.Unstructured{} cm.SetGroupVersionKind(d.GetClaimGroupVersionKind()) + xr := &kunstructured.Unstructured{} + xr.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) - cp := &kunstructured.Unstructured{} - cp.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) - - if err := r.claim.Start(claim.ControllerName(d.GetName()), ko, - engine.WatchFor(cm, &handler.EnqueueRequestForObject{}), - engine.WatchFor(cp, &EnqueueRequestForClaim{}), + if err := r.engine.StartWatches(claim.ControllerName(d.GetName()), + engine.WatchFor(cm, engine.WatchTypeClaim, &handler.EnqueueRequestForObject{}), + engine.WatchFor(xr, engine.WatchTypeCompositeResource, &EnqueueRequestForClaim{}), ); err != nil { - err = errors.Wrap(err, errStartController) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonOfferXRC, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource claim controller") d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) d.Status.SetConditions(v1.WatchingClaim()) diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index 1be2ad2ad..366117324 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -29,40 +29,49 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - "github.com/crossplane/crossplane/internal/controller/engine" + "github.com/crossplane/crossplane/internal/engine" ) -var _ ControllerEngine = &MockEngine{} - type MockEngine struct { - ControllerEngine - MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockStartWatches func(name string, ws ...engine.Watch) error + MockGetClient func() client.Client +} + +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} +) + +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { - return m.MockStart(name, o, w...) +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) +func (m *MockEngine) IsRunning(name string) bool { + return m.MockIsRunning(name) } -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) +} + +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() } func TestReconcile(t *testing.T) { @@ -73,7 +82,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -89,13 +98,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -105,13 +111,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -121,13 +124,12 @@ func TestReconcile(t *testing.T) { "RenderCompositeResourceDefinitionError": { reason: "We should return any error we encounter while rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -140,18 +142,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -164,24 +165,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter while getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -191,27 +191,60 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errGetCRD), }, }, - "RemoveFinalizerError": { - reason: "We should return any error we encounter while removing a finalizer.", + "CustomResourceDefinitionNotFoundStopControllerError": { + reason: "We should return any error we encounter while stopping our controller (just in case) when the CRD doesn't exist.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "RemoveFinalizerError": { + reason: "We should return any error we encounter while removing a finalizer.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }}), @@ -224,24 +257,26 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }}), @@ -254,29 +289,28 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -289,36 +323,35 @@ func TestReconcile(t *testing.T) { "DeleteCustomResourcesError": { reason: "We should return any error we encounter while deleting defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -331,36 +364,35 @@ func TestReconcile(t *testing.T) { "SuccessfulDeleteCustomResources": { reason: "We should requeue to ensure our defined resources are gone before we remove our CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -370,38 +402,80 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "DeleteCustomResourceDefinitionError": { - reason: "We should return any error we encounter while deleting the CRD we created.", + "StopControllerError": { + reason: "We should return any error we encounter while stopping our controller.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{}, - } - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "DeleteCustomResourceDefinitionError": { + reason: "We should return any error we encounter while deleting the CRD we created.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{}, + } + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -411,44 +485,46 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingClaim()) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -458,13 +534,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -480,16 +555,15 @@ func TestReconcile(t *testing.T) { "ApplyCRDError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -505,41 +579,105 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + }, + }, + want: want{ + r: reconcile.Result{Requeue: true}, + }, + }, + "VersionChangedStopControllerError": { + reason: "We should return any error we encounter while stopping our controller because the XRD's referencable version changed.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + xrd := &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + Group: "example.org", + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "Claim", + }, + Versions: []v1.CompositeResourceDefinitionVersion{ + { + Name: "v2", + Referenceable: true, + }, + { + Name: "v1", + }, + }, + }, + Status: v1.CompositeResourceDefinitionStatus{ + Controllers: v1.CompositeResourceDefinitionControllerStatus{ + CompositeResourceClaimTypeRef: v1.TypeReference{ + APIVersion: "example.org/v1", + Kind: "Claim", + }, + }, + }, + } + + *obj.(*v1.CompositeResourceDefinition) = *xrd return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { - return &extv1.CustomResourceDefinition{}, nil + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil })), WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStop: func(_ context.Context, _ string) error { + return errBoom + }, + }), }, }, want: want{ - r: reconcile.Result{Requeue: true}, + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStopController), }, }, "StartControllerError": { reason: "We should return any error we encounter while starting our controller.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -553,8 +691,9 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return errBoom }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return errBoom }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -562,28 +701,68 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errStartController), }, }, + "StartWatchesError": { + reason: "We should return any error we encounter while starting watches.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, + opts: []ReconcilerOption{ + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return nil + }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { + return errBoom + }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStartWatches), + }, + }, "SuccessfulStart": { reason: "We should not requeue if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -597,8 +776,10 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }, ), }, @@ -610,40 +791,85 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should not requeue if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingClaim()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } + return nil + }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingClaim()) + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, + "NotRestartingWithoutVersionChange": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -657,9 +883,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, - MockStop: func(_ string) {}, + MockIsRunning: func(_ string) bool { return true }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + t.Errorf("MockStart should not be called") + return nil + }, }), }, }, @@ -671,7 +899,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, WithLogger(testLog))...) + r := NewReconciler(tc.args.ca, append(tc.args.opts, WithLogger(testLog))...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index 8aec1e898..58f50a2a5 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -171,6 +171,12 @@ type usageResource struct { // NewReconciler returns a Reconciler of Usages. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { + // TODO(negz): Stop using this wrapper? It's only necessary if the client is + // backed by a cache, and at the time of writing the manager's client isn't. + // It's configured not to automatically cache unstructured objects. The + // wrapper is needed when caching because controller-runtime doesn't support + // caching types that satisfy runtime.Unstructured - it only supports the + // concrete *unstructured.Unstructured type. kube := unstructured.NewClient(mgr.GetClient()) r := &Reconciler{ diff --git a/internal/controller/engine/cache.go b/internal/controller/engine/cache.go deleted file mode 100644 index b8264b9e8..000000000 --- a/internal/controller/engine/cache.go +++ /dev/null @@ -1,305 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - - "github.com/crossplane/crossplane-runtime/pkg/errors" -) - -// GVKRoutedCache is a cache that routes requests by GVK to other caches. -type GVKRoutedCache struct { - scheme *runtime.Scheme - - fallback cache.Cache - - lock sync.RWMutex - delegates map[schema.GroupVersionKind]cache.Cache -} - -// NewGVKRoutedCache returns a new routed cache. -func NewGVKRoutedCache(scheme *runtime.Scheme, fallback cache.Cache) *GVKRoutedCache { - return &GVKRoutedCache{ - scheme: scheme, - fallback: fallback, - delegates: make(map[schema.GroupVersionKind]cache.Cache), - } -} - -var _ cache.Cache = &GVKRoutedCache{} - -// AddDelegate adds a delegated cache for a given GVK. -func (c *GVKRoutedCache) AddDelegate(gvk schema.GroupVersionKind, delegate cache.Cache) { - c.lock.Lock() - defer c.lock.Unlock() - - c.delegates[gvk] = delegate -} - -// RemoveDelegate removes a delegated cache for a given GVK. -func (c *GVKRoutedCache) RemoveDelegate(gvk schema.GroupVersionKind) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.delegates, gvk) -} - -// Get retrieves an object for a given ObjectKey backed by a cache. -func (c *GVKRoutedCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.Get(ctx, key, obj, opts...) - } - - return c.fallback.Get(ctx, key, obj, opts...) -} - -// List lists objects for a given ObjectList backed by a cache. -func (c *GVKRoutedCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - gvk, err := apiutil.GVKForObject(list, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", list, err) - } - - if !strings.HasSuffix(gvk.Kind, "List") { - // following controller-runtime here which does not support non - // List types. - return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) - } - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.List(ctx, list, opts...) - } - - return c.fallback.List(ctx, list, opts...) -} - -// GetInformer returns an informer for the given object. -func (c *GVKRoutedCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return nil, errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.GetInformer(ctx, obj, opts...) - } - - return c.fallback.GetInformer(ctx, obj, opts...) -} - -// GetInformerForKind returns an informer for the given GVK. -func (c *GVKRoutedCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.GetInformerForKind(ctx, gvk, opts...) - } - - return c.fallback.GetInformerForKind(ctx, gvk, opts...) -} - -// RemoveInformer removes an informer entry and stops it if it was running. -func (c *GVKRoutedCache) RemoveInformer(ctx context.Context, obj client.Object) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.RemoveInformer(ctx, obj) - } - - return c.fallback.RemoveInformer(ctx, obj) -} - -// Start for a GVKRoutedCache is a no-op. Start must be called for each delegate. -func (c *GVKRoutedCache) Start(_ context.Context) error { - return nil -} - -// WaitForCacheSync for a GVKRoutedCache waits for all delegates and the -// fallback to sync, and returns false if any of them fails to sync. -func (c *GVKRoutedCache) WaitForCacheSync(ctx context.Context) bool { - c.lock.RLock() - syncedCh := make(chan bool, len(c.delegates)+1) - cas := make([]cache.Cache, 0, len(c.delegates)) - for _, ca := range c.delegates { - cas = append(cas, ca) - } - cas = append(cas, c.fallback) - c.lock.RUnlock() - - var wg sync.WaitGroup - ctx, cancelFn := context.WithCancel(ctx) - - for _, ca := range cas { - wg.Add(1) - go func(ca cache.Cache) { - defer wg.Done() - synced := ca.WaitForCacheSync(ctx) - if !synced { - // first unsynced cache breaks the whole wait - cancelFn() - } - syncedCh <- synced - }(ca) - } - - wg.Wait() - close(syncedCh) - cancelFn() - - // any not synced? - for synced := range syncedCh { - if !synced { - return false - } - } - - return c.fallback.WaitForCacheSync(ctx) -} - -// IndexField adds an index with the given field name on the given object type -// by using the given function to extract the value for that field. -func (c *GVKRoutedCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.IndexField(ctx, obj, field, extractValue) - } - - return c.fallback.IndexField(ctx, obj, field, extractValue) -} - -// cachedRoutedClient wraps a client and routes read requests by GVK to a cache. -type cachedRoutedClient struct { - client.Client - - scheme *runtime.Scheme - cache *GVKRoutedCache -} - -func (c *cachedRoutedClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.cache.lock.RLock() - delegate, ok := c.cache.delegates[gvk] - c.cache.lock.RUnlock() - - if ok { - return delegate.Get(ctx, key, obj, opts...) - } - - return c.Client.Get(ctx, key, obj, opts...) -} - -func (c *cachedRoutedClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - gvk, err := apiutil.GVKForObject(list, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", list, err) - } - - if !strings.HasSuffix(gvk.Kind, "List") { - // following controller-runtime here which does not support non - // List types. - return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) - } - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - c.cache.lock.RLock() - delegate, ok := c.cache.delegates[gvk] - c.cache.lock.RUnlock() - - if ok { - return delegate.List(ctx, list, opts...) - } - - return c.Client.List(ctx, list, opts...) -} - -// WithGVKRoutedCache returns a manager backed by a GVKRoutedCache. The client -// returned by the manager will route read requests to cached GVKs. -func WithGVKRoutedCache(c *GVKRoutedCache, mgr controllerruntime.Manager) controllerruntime.Manager { - return &routedManager{ - Manager: mgr, - client: &cachedRoutedClient{ - Client: mgr.GetClient(), - scheme: mgr.GetScheme(), - cache: c, - }, - cache: c, - } -} - -type routedManager struct { - controllerruntime.Manager - - client client.Client - cache cache.Cache -} - -func (m *routedManager) GetClient() client.Client { - return m.client -} - -func (m *routedManager) GetCache() cache.Cache { - return m.cache -} diff --git a/internal/controller/engine/engine.go b/internal/controller/engine/engine.go deleted file mode 100644 index 552aeb1a4..000000000 --- a/internal/controller/engine/engine.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package engine provides utilties for working with controllers. -package engine - -import ( - "context" - "sync" - - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/crossplane/crossplane-runtime/pkg/errors" -) - -// Error strings. -const ( - errCreateCache = "cannot create new cache" - errCreateController = "cannot create new controller" - errCrashCache = "cache error" - errCrashController = "controller error" - errWatch = "cannot setup watch" -) - -// A NewCacheFn creates a new controller-runtime cache. -type NewCacheFn func(cfg *rest.Config, o cache.Options) (cache.Cache, error) - -// A NewControllerFn creates a new controller-runtime controller. -type NewControllerFn func(name string, m manager.Manager, o controller.Options) (controller.Controller, error) - -// The default new cache and new controller functions. -// -//nolint:gochecknoglobals // We treat these as constants. -var ( - DefaultNewCacheFn NewCacheFn = cache.New - DefaultNewControllerFn NewControllerFn = controller.NewUnmanaged -) - -// An ControllerEngine manages the lifecycles of controller-runtime controllers -// (and their caches). The lifecycles of the controllers are not coupled to -// lifecycle of the engine, nor to the lifecycle of the controller manager it -// uses. -type ControllerEngine struct { - mgr manager.Manager - - started map[string]context.CancelFunc - errors map[string]error - mx sync.RWMutex - - newCache NewCacheFn - newCtrl NewControllerFn -} - -// An ControllerEngineOption configures a ControllerEngine. -type ControllerEngineOption func(*ControllerEngine) - -// WithNewCacheFn may be used to configure a different cache implementation. -// DefaultNewCacheFn is used by default. -func WithNewCacheFn(fn NewCacheFn) ControllerEngineOption { - return func(e *ControllerEngine) { - e.newCache = fn - } -} - -// WithNewControllerFn may be used to configure a different controller -// implementation. DefaultNewControllerFn is used by default. -func WithNewControllerFn(fn NewControllerFn) ControllerEngineOption { - return func(e *ControllerEngine) { - e.newCtrl = fn - } -} - -// New produces a new ControllerEngine. -func New(mgr manager.Manager, o ...ControllerEngineOption) *ControllerEngine { - e := &ControllerEngine{ - mgr: mgr, - - started: make(map[string]context.CancelFunc), - errors: make(map[string]error), - - newCache: DefaultNewCacheFn, - newCtrl: DefaultNewControllerFn, - } - - for _, eo := range o { - eo(e) - } - - return e -} - -// IsRunning indicates whether the named controller is running - i.e. whether it -// has been started and does not appear to have crashed. -func (e *ControllerEngine) IsRunning(name string) bool { - e.mx.RLock() - defer e.mx.RUnlock() - - _, running := e.started[name] - return running -} - -// Err returns any error encountered by the named controller. The returned error -// is always nil if the named controller is running. -func (e *ControllerEngine) Err(name string) error { - e.mx.RLock() - defer e.mx.RUnlock() - - return e.errors[name] -} - -// Stop the named controller. -func (e *ControllerEngine) Stop(name string) { - e.done(name, nil) -} - -func (e *ControllerEngine) done(name string, err error) { - e.mx.Lock() - defer e.mx.Unlock() - - stop, ok := e.started[name] - if ok { - stop() - delete(e.started, name) - } - - // Don't overwrite the first error if done is called multiple times. - if e.errors[name] != nil { - return - } - e.errors[name] = err -} - -// Watch an object. -type Watch struct { - // A watch is either a customSource, or a kind, handler, and predicates. - customSource source.Source - - kind client.Object - handler handler.EventHandler - predicates []predicate.Predicate -} - -// WatchFor returns a Watch for the supplied kind of object. Events will be -// handled by the supplied EventHandler, and may be filtered by the supplied -// predicates. -func WatchFor(kind client.Object, h handler.EventHandler, p ...predicate.Predicate) Watch { - return Watch{kind: kind, handler: h, predicates: p} -} - -// WatchTriggeredBy returns a custom watch for secondary resources triggering -// the controller. source.Kind can be used to create a source for a secondary -// cache. Events will be handled by the supplied EventHandler, and may be -// filtered by the supplied predicates. -func WatchTriggeredBy(source source.Source) Watch { - return Watch{customSource: source} -} - -// Start the named controller. Each controller is started with its own cache -// whose lifecycle is coupled to the controller. The controller is started with -// the supplied options, and configured with the supplied watches. Start does -// not block. -func (e *ControllerEngine) Start(name string, o controller.Options, w ...Watch) error { - c, err := e.Create(name, o, w...) - if err != nil { - return err - } - return c.Start(context.Background()) -} - -// NamedController is a controller that's not yet started. It gives access to -// the underlying cache, which may be used e.g. to add indexes. -type NamedController interface { - Start(ctx context.Context) error - GetCache() cache.Cache -} - -type namedController struct { - name string - e *ControllerEngine - ca cache.Cache - ctrl controller.Controller -} - -// Create the named controller. Each controller gets its own cache -// whose lifecycle is coupled to the controller. The controller is created with -// the supplied options, and configured with the supplied watches. It is not -// started yet. -func (e *ControllerEngine) Create(name string, o controller.Options, w ...Watch) (NamedController, error) { - // Each controller gets its own cache for the GVKs it owns. This cache is - // wrapped by a GVKRoutedCache that routes requests to other GVKs to the - // manager's cache. This way we can share informers for composed resources - // (that's where this is primarily used) with other controllers, but get - // control about the lifecycle of the owned GVKs' informers. - ca, err := e.newCache(e.mgr.GetConfig(), cache.Options{Scheme: e.mgr.GetScheme(), Mapper: e.mgr.GetRESTMapper()}) - if err != nil { - return nil, errors.Wrap(err, errCreateCache) - } - - // Wrap the existing manager to use our cache for the GVKs of this controller. - rc := NewGVKRoutedCache(e.mgr.GetScheme(), e.mgr.GetCache()) - rm := &routedManager{ - Manager: e.mgr, - client: &cachedRoutedClient{ - Client: e.mgr.GetClient(), - scheme: e.mgr.GetScheme(), - cache: rc, - }, - cache: rc, - } - - ctrl, err := e.newCtrl(name, rm, o) - if err != nil { - return nil, errors.Wrap(err, errCreateController) - } - - for _, wt := range w { - if wt.customSource != nil { - if err := ctrl.Watch(wt.customSource); err != nil { - return nil, errors.Wrap(err, errWatch) - } - continue - } - - // route cache and client (read) requests to our cache for this GVK. - gvk, err := apiutil.GVKForObject(wt.kind, e.mgr.GetScheme()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get GVK for type %T", wt.kind) - } - rc.AddDelegate(gvk, ca) - - if err := ctrl.Watch(source.Kind(ca, wt.kind, wt.handler, wt.predicates...)); err != nil { - return nil, errors.Wrap(err, errWatch) - } - } - - return &namedController{name: name, e: e, ca: ca, ctrl: ctrl}, nil -} - -// Start the named controller. Start does not block. -func (c *namedController) Start(ctx context.Context) error { - if c.e.IsRunning(c.name) { - return nil - } - - ctx, stop := context.WithCancel(ctx) - c.e.mx.Lock() - c.e.started[c.name] = stop - c.e.errors[c.name] = nil - c.e.mx.Unlock() - - go func() { - <-c.e.mgr.Elected() - c.e.done(c.name, errors.Wrap(c.ca.Start(ctx), errCrashCache)) - }() - go func() { - <-c.e.mgr.Elected() - if synced := c.ca.WaitForCacheSync(ctx); !synced { - c.e.done(c.name, errors.New(errCrashCache)) - return - } - c.e.done(c.name, errors.Wrap(c.ctrl.Start(ctx), errCrashController)) - }() - - return nil -} - -// GetCache returns the cache used by the named controller. -func (c *namedController) GetCache() cache.Cache { - return c.ca -} diff --git a/internal/controller/engine/engine_test.go b/internal/controller/engine/engine_test.go deleted file mode 100644 index 540ddee6e..000000000 --- a/internal/controller/engine/engine_test.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" - "github.com/crossplane/crossplane-runtime/pkg/test" -) - -type MockCache struct { - cache.Cache - - MockStart func(stop context.Context) error -} - -func (c *MockCache) Start(stop context.Context) error { - return c.MockStart(stop) -} - -func (c *MockCache) WaitForCacheSync(_ context.Context) bool { - return true -} - -type MockController struct { - controller.Controller - - MockStart func(stop context.Context) error - MockWatch func(s source.Source) error -} - -func (c *MockController) Start(stop context.Context) error { - return c.MockStart(stop) -} - -func (c *MockController) Watch(s source.Source) error { - return c.MockWatch(s) -} - -func TestEngine(t *testing.T) { - errBoom := errors.New("boom") - - type args struct { - name string - o controller.Options - w []Watch - } - type want struct { - err error - crash error - } - cases := map[string]struct { - reason string - e *ControllerEngine - args args - want want - }{ - "NewCacheError": { - reason: "Errors creating a new cache should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, errBoom }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - err: errors.Wrap(errBoom, errCreateCache), - }, - }, - "NewControllerError": { - reason: "Errors creating a new controller should be returned", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { return nil, errBoom }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - err: errors.Wrap(errBoom, errCreateController), - }, - }, - "WatchError": { - reason: "Errors adding a watch should be returned", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source) error { return errBoom }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - w: []Watch{WatchFor(&unstructured.Unstructured{ - Object: map[string]interface{}{"apiVersion": "example.org/v1", "kind": "Thing"}, - }, nil)}, - }, - want: want{ - err: errors.Wrap(errBoom, errWatch), - }, - }, - "SchemeError": { - reason: "Passing an object of unknown GVK", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source) error { return errBoom }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - w: []Watch{WatchFor(&unstructured.Unstructured{}, nil)}, - }, - want: want{ - err: errors.Wrap(runtime.NewMissingKindErr("unstructured object has no kind"), "failed to get GVK for type *unstructured.Unstructured"), - }, - }, - "CacheCrashError": { - reason: "Errors starting or running a cache should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { - c := &MockCache{MockStart: func(_ context.Context) error { return errBoom }} - return c, nil - }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockStart: func(_ context.Context) error { - return nil - }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - crash: errors.Wrap(errBoom, errCrashCache), - }, - }, - "ControllerCrashError": { - reason: "Errors starting or running a controller should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { - c := &MockCache{MockStart: func(_ context.Context) error { - return nil - }} - return c, nil - }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockStart: func(_ context.Context) error { - return errBoom - }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - crash: errors.Wrap(errBoom, errCrashController), - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - err := tc.e.Start(tc.args.name, tc.args.o, tc.args.w...) - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) - } - - // Give the goroutines a little time to return an error. If this - // becomes flaky or time consuming we could use a ticker instead. - time.Sleep(100 * time.Millisecond) - - tc.e.Stop(tc.args.name) - if diff := cmp.Diff(tc.want.crash, tc.e.Err(tc.args.name), test.EquateErrors()); diff != "" { - t.Errorf("\n%s\ne.Err(...): -want error, +got error:\n%s", tc.reason, diff) - } - }) - } -} diff --git a/internal/engine/cache.go b/internal/engine/cache.go new file mode 100644 index 000000000..c1ccca089 --- /dev/null +++ b/internal/engine/cache.go @@ -0,0 +1,189 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +var ( + _ cache.Cache = &InformerTrackingCache{} + _ TrackingInformers = &InformerTrackingCache{} +) + +// An InformerTrackingCache wraps a cache.Cache and keeps track of what GVKs it +// has started informers for. It takes a blocking lock whenever a new informer +// is started or stopped, but so does the standard controller-runtime Cache +// implementation. +type InformerTrackingCache struct { + // The wrapped cache. + cache.Cache + + scheme *runtime.Scheme + + mx sync.RWMutex + active map[schema.GroupVersionKind]bool +} + +// TrackInformers wraps the supplied cache, adding a method to query which +// informers are active. +func TrackInformers(c cache.Cache, s *runtime.Scheme) *InformerTrackingCache { + return &InformerTrackingCache{ + Cache: c, + scheme: s, + active: make(map[schema.GroupVersionKind]bool), + } +} + +// ActiveInformers returns the GVKs of the informers believed to currently be +// active. The InformerTrackingCache considers an informer to become active when +// a caller calls Get, List, or one of the GetInformer methods. It considers an +// informer to become inactive when a caller calls the RemoveInformer method. +func (c *InformerTrackingCache) ActiveInformers() []schema.GroupVersionKind { + c.mx.RLock() + defer c.mx.RUnlock() + + out := make([]schema.GroupVersionKind, 0, len(c.active)) + for gvk := range c.active { + out = append(out, gvk) + } + return out +} + +// Get retrieves an obj for the given object key from the Kubernetes Cluster. +// obj must be a struct pointer so that obj can be updated with the response +// returned by the Server. +// +// Getting an object marks the informer for the object's GVK active. +func (c *InformerTrackingCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.Get(ctx, key, obj, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.Get(ctx, key, obj, opts...) +} + +// List retrieves list of objects for a given namespace and list options. On a +// successful call, Items field in the list will be populated with the result +// returned from the server. +// +// Listing objects marks the informer for the object's GVK active. +func (c *InformerTrackingCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.List(ctx, list, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.List(ctx, list, opts...) +} + +// GetInformer fetches or constructs an informer for the given object that +// corresponds to a single API kind and resource. +// +// Getting an informer for an object marks the informer as active. +func (c *InformerTrackingCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.GetInformer(ctx, obj, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.GetInformer(ctx, obj, opts...) +} + +// GetInformerForKind is similar to GetInformer, except that it takes a +// group-version-kind, instead of the underlying object. +// +// Getting an informer marks the informer as active. +func (c *InformerTrackingCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.GetInformerForKind(ctx, gvk, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.GetInformerForKind(ctx, gvk, opts...) +} + +// RemoveInformer removes an informer entry and stops it if it was running. +// +// Removing an informer marks the informer as inactive. +func (c *InformerTrackingCache) RemoveInformer(ctx context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; !active { + // This should only happen if RemoveInformer is called for an informer + // that was never started. + defer c.mx.RUnlock() + return c.Cache.RemoveInformer(ctx, obj) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + delete(c.active, gvk) + return c.Cache.RemoveInformer(ctx, obj) +} diff --git a/internal/engine/cache_test.go b/internal/engine/cache_test.go new file mode 100644 index 000000000..8fac54e60 --- /dev/null +++ b/internal/engine/cache_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ cache.Cache = &MockCache{} + +type MockCache struct { + cache.Cache + + MockGet func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + MockList func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error + MockGetInformer func(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) + MockGetInformerForKind func(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) + MockRemoveInformer func(ctx context.Context, obj client.Object) error +} + +func (m *MockCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return m.MockGet(ctx, key, obj, opts...) +} + +func (m *MockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return m.MockList(ctx, list, opts...) +} + +func (m *MockCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformer(ctx, obj, opts...) +} + +func (m *MockCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformerForKind(ctx, gvk, opts...) +} + +func (m *MockCache) RemoveInformer(ctx context.Context, obj client.Object) error { + return m.MockRemoveInformer(ctx, obj) +} + +func TestActiveInformers(t *testing.T) { + c := &MockCache{ + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error { + return nil + }, + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { + return nil + }, + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, nil + }, + MockGetInformerForKind: func(_ context.Context, _ schema.GroupVersionKind, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, nil + }, + MockRemoveInformer: func(_ context.Context, _ client.Object) error { return nil }, + } + + itc := TrackInformers(c, runtime.NewScheme()) + + ctx := context.Background() + + // We intentionally call methods twice to cover the code paths where we + // don't start tracking an informer because we already track it (and vice + // versa for remove). + + // Get a GVK + get := &unstructured.Unstructured{} + get.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Get", + }) + _ = itc.Get(ctx, client.ObjectKeyFromObject(get), get) + _ = itc.Get(ctx, client.ObjectKeyFromObject(get), get) + + // List a GVK + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "ListList", // It's a list list! + }) + _ = itc.List(ctx, list) + _ = itc.List(ctx, list) + + // Get an informer + getinf := &unstructured.Unstructured{} + getinf.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformer", + }) + _, _ = itc.GetInformer(ctx, getinf) + _, _ = itc.GetInformer(ctx, getinf) + + // Get an informer by GVK + getgvk := schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformerForKind", + } + _, _ = itc.GetInformerForKind(ctx, getgvk) + _, _ = itc.GetInformerForKind(ctx, getgvk) + + // Get a GVK, then remove its informer. + remove := &unstructured.Unstructured{} + remove.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "RemoveMe", + }) + _ = itc.Get(ctx, client.ObjectKeyFromObject(remove), remove) + _ = itc.RemoveInformer(ctx, remove) + _ = itc.RemoveInformer(ctx, remove) + + want := []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Get", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "List", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformer", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformerForKind", + }, + } + + got := itc.ActiveInformers() + if diff := cmp.Diff(want, got, cmpopts.SortSlices(func(a, b schema.GroupVersionKind) bool { return a.String() > b.String() })); diff != "" { + t.Errorf("\nitc.ActiveInformers(...): -want, +got:\n%s", diff) + } +} diff --git a/internal/engine/engine.go b/internal/engine/engine.go new file mode 100644 index 000000000..f7688ab2a --- /dev/null +++ b/internal/engine/engine.go @@ -0,0 +1,539 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package engine manages the lifecycle of a set of controllers. +package engine + +import ( + "context" + "sync" + "time" + + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + kcache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" +) + +// A ControllerEngine manages a set of controllers that can be dynamically +// started and stopped. It also manages a dynamic set of watches per controller, +// and the informers that back them. +type ControllerEngine struct { + // The manager of this engine's controllers. Controllers managed by the + // engine use the engine's client and cache, not the manager's. + mgr manager.Manager + + // The engine must have exclusive use of these informers. All controllers + // managed by the engine should use these informers. + infs TrackingInformers + + // The client used by the engine's controllers. The client must be backed by + // the above TrackingInformers. + client client.Client + + log logging.Logger + + // Protects everything below. + mx sync.RWMutex + + // Running controllers, by name. + controllers map[string]*controller +} + +// TrackingInformers is a set of Informers. It tracks which are active. +type TrackingInformers interface { + cache.Informers + ActiveInformers() []schema.GroupVersionKind +} + +// New creates a new controller engine. +func New(mgr manager.Manager, infs TrackingInformers, c client.Client, o ...ControllerEngineOption) *ControllerEngine { + e := &ControllerEngine{ + mgr: mgr, + infs: infs, + client: c, + log: logging.NewNopLogger(), + controllers: make(map[string]*controller), + } + + for _, fn := range o { + fn(e) + } + + return e +} + +// An ControllerEngineOption configures a controller engine. +type ControllerEngineOption func(*ControllerEngine) + +// WithLogger configures an Engine to use a logger. +func WithLogger(l logging.Logger) ControllerEngineOption { + return func(e *ControllerEngine) { + e.log = l + } +} + +type controller struct { + // The running controller. + ctrl kcontroller.Controller + + // Called to stop the controller. + cancel context.CancelFunc + + // Protects the below map. + mx sync.RWMutex + + // The controller's sources, by watched GVK. + sources map[WatchID]*StoppableSource +} + +// A WatchGarbageCollector periodically garbage collects watches. +type WatchGarbageCollector interface { + GarbageCollectWatches(ctx context.Context, interval time.Duration) +} + +// A NewControllerFn can start a new controller-runtime controller. +type NewControllerFn func(name string, mgr manager.Manager, options kcontroller.Options) (kcontroller.Controller, error) + +// ControllerOptions configure a controller. +type ControllerOptions struct { + runtime kcontroller.Options + nc NewControllerFn + gc WatchGarbageCollector +} + +// A ControllerOption configures a controller. +type ControllerOption func(o *ControllerOptions) + +// WithRuntimeOptions configures the underlying controller-runtime controller. +func WithRuntimeOptions(ko kcontroller.Options) ControllerOption { + return func(o *ControllerOptions) { + o.runtime = ko + } +} + +// WithWatchGarbageCollector specifies an optional garbage collector this +// controller should use to remove unused watches. +func WithWatchGarbageCollector(gc WatchGarbageCollector) ControllerOption { + return func(o *ControllerOptions) { + o.gc = gc + } +} + +// WithNewControllerFn configures how the engine starts a new controller-runtime +// controller. +func WithNewControllerFn(fn NewControllerFn) ControllerOption { + return func(o *ControllerOptions) { + o.nc = fn + } +} + +// GetClient gets a client backed by the controller engine's cache. +func (e *ControllerEngine) GetClient() client.Client { + return e.client +} + +// GetFieldIndexer returns a FieldIndexer that can be used to add indexes to the +// controller engine's cache. +func (e *ControllerEngine) GetFieldIndexer() client.FieldIndexer { + return e.infs +} + +// Start a new controller. +func (e *ControllerEngine) Start(name string, o ...ControllerOption) error { + e.mx.Lock() + defer e.mx.Unlock() + + // Start is a no-op if the controller is already running. + if _, running := e.controllers[name]; running { + return nil + } + + co := &ControllerOptions{nc: kcontroller.NewUnmanaged} + for _, fn := range o { + fn(co) + } + + c, err := co.nc(name, e.mgr, co.runtime) + if err != nil { + return errors.Wrap(err, "cannot create new controller") + } + + // The caller will usually be a reconcile method. We want the controller + // to keep running when the reconcile ends, so we create a new context + // instead of taking one as an argument. + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + // Don't start the controller until the manager is elected. + <-e.mgr.Elected() + + e.log.Debug("Starting new controller", "controller", name) + + // Run the controller until its context is cancelled. + if err := c.Start(ctx); err != nil { + e.log.Info("Controller stopped with an error", "name", name, "error", err) + + // Make a best effort attempt to cleanup the controller so that + // IsRunning will return false. + _ = e.Stop(ctx, name) + return + } + + e.log.Debug("Stopped controller", "controller", name) + }() + + if co.gc != nil { + go func() { + // Don't start the garbage collector until the manager is elected. + <-e.mgr.Elected() + + e.log.Debug("Starting watch garbage collector for controller", "controller", name) + + // Run the collector every minute until its context is cancelled. + co.gc.GarbageCollectWatches(ctx, 1*time.Minute) + + e.log.Debug("Stopped watch garbage collector for controller", "controller", name) + }() + } + + r := &controller{ + ctrl: c, + cancel: cancel, + sources: make(map[WatchID]*StoppableSource), + } + + e.controllers[name] = r + + return nil +} + +// Stop a controller. +func (e *ControllerEngine) Stop(ctx context.Context, name string) error { + e.mx.Lock() + defer e.mx.Unlock() + + c, running := e.controllers[name] + + // Stop is a no-op if the controller isn't running. + if !running { + return nil + } + + c.mx.Lock() + defer c.mx.Unlock() + + // Stop the controller's watches. + for wid, w := range c.sources { + if err := w.Stop(ctx); err != nil { + c.mx.Unlock() + return errors.Wrapf(err, "cannot stop %q watch for %q", wid.Type, wid.GVK) + } + delete(c.sources, wid) + e.log.Debug("Stopped watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + } + + // Stop and delete the controller. + c.cancel() + delete(e.controllers, name) + + e.log.Debug("Stopped controller", "controller", name) + return nil +} + +// IsRunning returns true if the named controller is running. +func (e *ControllerEngine) IsRunning(name string) bool { + e.mx.RLock() + defer e.mx.RUnlock() + _, running := e.controllers[name] + return running +} + +// A WatchType uniquely identifies a "type" of watch - i.e. a handler and a set +// of predicates. The controller engine uniquely identifies a Watch by its +// (kind, watch type) tuple. The engine will only start one watch of each (kind, +// watch type) tuple. To watch the same kind of resource multiple times, use +// different watch types. +type WatchType string + +// Common watch types. +const ( + WatchTypeClaim WatchType = "Claim" + WatchTypeCompositeResource WatchType = "CompositeResource" + WatchTypeComposedResource WatchType = "ComposedResource" + WatchTypeCompositionRevision WatchType = "CompositionRevision" +) + +// Watch an object. +type Watch struct { + wt WatchType + kind client.Object + handler handler.EventHandler + predicates []predicate.Predicate +} + +// A WatchID uniquely identifies a watch. +type WatchID struct { + Type WatchType + GVK schema.GroupVersionKind +} + +// WatchFor returns a Watch for the supplied kind of object. Events will be +// handled by the supplied EventHandler, and may be filtered by the supplied +// predicates. +func WatchFor(kind client.Object, wt WatchType, h handler.EventHandler, p ...predicate.Predicate) Watch { + return Watch{kind: kind, wt: wt, handler: h, predicates: p} +} + +// StartWatches instructs the named controller to start the supplied watches. +// The controller will only start a watch if it's not already watching the type +// of object specified by the supplied Watch. StartWatches blocks other +// operations on the same controller if and when it starts a watch. +func (e *ControllerEngine) StartWatches(name string, ws ...Watch) error { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return errors.Errorf("controller %q is not running", name) + } + + // Make sure we can get GVKs for all the watches before we take locks. + gvks := make([]schema.GroupVersionKind, len(ws)) + for i := range ws { + gvk, err := apiutil.GVKForObject(ws[i].kind, e.mgr.GetScheme()) + if err != nil { + return errors.Wrapf(err, "cannot determine group, version, and kind for %T", ws[i].kind) + } + gvks[i] = gvk + } + + // It's possible that we didn't explicitly stop a watch, but its backing + // informer was removed. This implicitly stops the watch by deleting its + // backing listener. If a watch exists but doesn't have an active informer, + // we want to restart the watch (and, implicitly, the informer). + // + // There's a potential race here. Another Goroutine could remove an informer + // between where we build the map and where we read it to check whether an + // informer is active. We wouldn't start a watch when we should. If the + // controller calls StartWatches repeatedly (e.g. an XR controller) this + // will eventually self-correct. + a := e.infs.ActiveInformers() + activeInformer := make(map[schema.GroupVersionKind]bool, len(a)) + for _, gvk := range a { + activeInformer[gvk] = true + } + + // Some controllers will call StartWatches on every reconcile. Most calls + // won't actually need to start a new watch. For example an XR controller + // would only need to start a new watch if an XR composed a new kind of + // resource that no other XR it controls already composes. So, we try to + // avoid taking a write lock and blocking all reconciles unless we need to. + c.mx.RLock() + start := false + for i, w := range ws { + wid := WatchID{Type: w.wt, GVK: gvks[i]} + // We've already created this watch and the informer backing it is still + // running. We don't need to create a new watch. + if _, watchExists := c.sources[wid]; watchExists && activeInformer[wid.GVK] { + e.log.Debug("Watch exists for GVK, not starting a new one", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + continue + } + // There's at least one watch to start. + start = true + break + } + c.mx.RUnlock() + + // Nothing to start. + if !start { + return nil + } + + // We have at least one watch to start - take the write lock. It's possible + // another Goroutine updated this controller's watches since we released the + // read lock, so we compute everything again. + c.mx.Lock() + defer c.mx.Unlock() + + // Start new sources. + for i, w := range ws { + wid := WatchID{Type: w.wt, GVK: gvks[i]} + + // We've already created this watch and the informer backing it is still + // running. We don't need to create a new watch. We don't debug log this + // one - we'll have logged it above unless the watch was added between + // releasing the read lock and taking the write lock. + if _, watchExists := c.sources[wid]; watchExists && activeInformer[wid.GVK] { + continue + } + + // The controller's Watch method just calls the StoppableSource's Start + // method, passing in its private work queue as an argument. This will + // start an informer for the watched kind if there isn't one running + // already. + // + // The watch will stop sending events when either the source is stopped, + // or its backing informer is stopped. The controller's work queue will + // stop processing events when the controller is stopped. + src := NewStoppableSource(e.infs, w.kind, w.handler, w.predicates...) + if err := c.ctrl.Watch(src); err != nil { + return errors.Wrapf(err, "cannot start %q watch for %q", wid.Type, wid.GVK) + } + + // Record that we're now running this source. + c.sources[wid] = src + + e.log.Debug("Started watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + } + + return nil +} + +// GetWatches returns the active watches for the supplied controller. +func (e *ControllerEngine) GetWatches(name string) ([]WatchID, error) { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return nil, errors.Errorf("controller %q is not running", name) + } + + c.mx.RLock() + defer c.mx.RUnlock() + + out := make([]WatchID, 0, len(c.sources)) + for wid := range c.sources { + out = append(out, wid) + } + return out, nil +} + +// StopWatches stops the supplied watches. StopWatches blocks other operations +// on the same controller if and when it stops a watch. It returns the number of +// watches that it successfully stopped. +func (e *ControllerEngine) StopWatches(ctx context.Context, name string, ws ...WatchID) (int, error) { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return 0, errors.Errorf("controller %q is not running", name) + } + + // Don't take the write lock if we want to keep all watches. + c.mx.RLock() + stop := false + for _, wid := range ws { + if _, watchExists := c.sources[wid]; watchExists { + stop = true + break + } + } + c.mx.RUnlock() + + if !stop { + return 0, nil + } + + // We have at least one watch to stop - take the write lock. It's possible + // another Goroutine updated this controller's watches since we released the + // read lock, so we compute everything again. + c.mx.Lock() + defer c.mx.Unlock() + + stopped := 0 + for _, wid := range ws { + w, watchExists := c.sources[wid] + if !watchExists { + continue + } + if err := w.Stop(ctx); err != nil { + return stopped, errors.Wrapf(err, "cannot stop %q watch for %q", wid.Type, wid.GVK) + } + delete(c.sources, wid) + e.log.Debug("Stopped watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + stopped++ + } + + return stopped, nil +} + +// GarbageCollectCustomResourceInformers garbage collects informers for custom +// resources (e.g. Crossplane XRs, claims and composed resources) when the CRD +// that defines them is deleted. The garbage collector runs until the supplied +// context is cancelled. +func (e *ControllerEngine) GarbageCollectCustomResourceInformers(ctx context.Context) error { + i, err := e.infs.GetInformer(ctx, &extv1.CustomResourceDefinition{}) + if err != nil { + return errors.Wrap(err, "cannot get informer for CustomResourceDefinitions") + } + + h, err := i.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + DeleteFunc: func(obj interface{}) { + o := obj + if fsu, ok := obj.(kcache.DeletedFinalStateUnknown); ok { + o = fsu.Obj + } + crd, ok := o.(*extv1.CustomResourceDefinition) + if !ok { + // This should never happen. + return + } + + for _, v := range crd.Spec.Versions { + gvk := schema.GroupVersionKind{ + Group: crd.Spec.Group, + Kind: crd.Spec.Names.Kind, + Version: v.Name, + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + + if err := e.infs.RemoveInformer(ctx, u); err != nil { + e.log.Info("Cannot remove informer for type defined by deleted CustomResourceDefinition", "crd", crd.GetName(), "gvk", gvk) + continue + } + + e.log.Debug("Removed informer for type defined by deleted CustomResourceDefinition", "crd", crd.GetName(), "gvk", gvk) + } + }, + }) + if err != nil { + return errors.Wrap(err, "cannot add garbage collector event handler to CustomResourceDefinition informer") + } + + go func() { + <-ctx.Done() + if err := i.RemoveEventHandler(h); err != nil { + e.log.Info("Cannot remove garbage collector event handler from CustomResourceDefinition informer") + } + }() + + return nil +} diff --git a/internal/engine/engine_test.go b/internal/engine/engine_test.go new file mode 100644 index 000000000..62ca8b463 --- /dev/null +++ b/internal/engine/engine_test.go @@ -0,0 +1,897 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ TrackingInformers = &MockTrackingInformers{} + +type MockTrackingInformers struct { + cache.Informers + + MockActiveInformers func() []schema.GroupVersionKind + MockGetInformer func(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) + MockRemoveInformer func(ctx context.Context, obj client.Object) error +} + +func (m *MockTrackingInformers) ActiveInformers() []schema.GroupVersionKind { + return m.MockActiveInformers() +} + +func (m *MockTrackingInformers) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformer(ctx, obj, opts...) +} + +func (m *MockTrackingInformers) RemoveInformer(ctx context.Context, obj client.Object) error { + return m.MockRemoveInformer(ctx, obj) +} + +var _ manager.Manager = &MockManager{} + +type MockManager struct { + manager.Manager + + MockElected func() <-chan struct{} + MockGetScheme func() *runtime.Scheme +} + +func (m *MockManager) Elected() <-chan struct{} { + return m.MockElected() +} + +func (m *MockManager) GetScheme() *runtime.Scheme { + return m.MockGetScheme() +} + +var _ WatchGarbageCollector = &MockWatchGarbageCollector{} + +type MockWatchGarbageCollector struct { + MockGarbageCollectWatches func(ctx context.Context, interval time.Duration) +} + +func (m *MockWatchGarbageCollector) GarbageCollectWatches(ctx context.Context, interval time.Duration) { + m.MockGarbageCollectWatches(ctx, interval) +} + +var _ kcontroller.Controller = &MockController{} + +type MockController struct { + kcontroller.Controller + + MockStart func(ctx context.Context) error + MockWatch func(src source.Source) error +} + +func (m *MockController) Start(ctx context.Context) error { + return m.MockStart(ctx) +} + +func (m *MockController) Watch(src source.Source) error { + return m.MockWatch(src) +} + +func TestStartController(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + name string + opts []ControllerOption + } + type want struct { + err error + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "NewControllerError": { + reason: "Start should return an error if it can't create a new controller", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return nil, errors.New("boom") + }), + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "StartControllerError": { + reason: "Start won't return an error if it can't start the new controller in a goroutine.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(_ context.Context) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + // TODO(negz): Test that the error was logged? We usually don't. + want: want{ + err: nil, + }, + }, + "SuccessfulStart": { + reason: "It should be possible to successfully start a controller.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + }, nil + }), + WithWatchGarbageCollector(&MockWatchGarbageCollector{ + MockGarbageCollectWatches: func(ctx context.Context, _ time.Duration) { + <-ctx.Done() + }, + }), + }, + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, tc.args.opts...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Starting the controller second time should be a no-op. + err = e.Start(tc.args.name, tc.args.opts...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = e.Stop(ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestIsRunning(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + + // We need to control how we start the controller. + type argsStart struct { + name string + opts []ControllerOption + } + type args struct { + name string + } + type want struct { + running bool + } + cases := map[string]struct { + reason string + params params + argsStart argsStart + args args + want want + }{ + "SuccessfulStart": { + reason: "IsRunning should return true if the controller successfully starts.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + }, + want: want{ + running: true, + }, + }, + "StartControllerError": { + reason: "IsRunning should return false if the controller didn't successfully start.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(_ context.Context) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + }, + want: want{ + running: false, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + _ = e.Start(tc.args.name, tc.argsStart.opts...) + + // Give the start goroutine a little time to fail. + time.Sleep(1 * time.Second) + + running := e.IsRunning(tc.args.name) + if diff := cmp.Diff(tc.want.running, running); diff != "" { + t.Errorf("\n%s\ne.IsRunning(...): -want, +got:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = e.Stop(ctx, tc.args.name) + + // IsRunning should always be false after the controller is stopped. + running = e.IsRunning(tc.args.name) + if diff := cmp.Diff(false, running); diff != "" { + t.Errorf("\n%s\ne.IsRunning(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStopController(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + ctx context.Context + name string + } + type want struct { + err error + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "SuccessfulStop": { + reason: "It should be possible to successfully stop a controller.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + }, + want: want{ + err: nil, + }, + }, + // TODO(negz): Test handling watches that fail to stop? I'm not sure + // it's worth the amount of complexity making StoppableSource injectable + // would add. We could make Watch an interface with a GetSource. + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + })) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Add a watch for stop to stop. + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Composed") + err = e.StartWatches(tc.args.name, WatchFor(u, WatchTypeComposedResource, nil)) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop should be a no-op when called on a stopped controller. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStartWatches(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + // We need to control how we start the controller. + type argsStart struct { + name string + opts []ControllerOption + } + type args struct { + name string + ws []Watch + } + type want struct { + err error + watches []WatchID + } + cases := map[string]struct { + reason string + params params + argsStart argsStart + args args + want want + }{ + "StartWatchError": { + reason: "StartWatches should return an error when a watch fails to start.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Composed", + }, + } + }, + }, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + ws: []Watch{ + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Composed") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "SuccessfulStartWatches": { + reason: "StartWatches shouldn't return an error when all watches start successfully.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + } + }, + }, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + ws: []Watch{ + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + // This should be deduplicated into the above watch. + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + // This shouldn't be deduplicated, because it's a different + // watch type. + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeCompositeResource, nil) + }(), + }, + }, + want: want{ + err: nil, + watches: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.argsStart.name, tc.argsStart.opts...) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = e.StartWatches(tc.args.name, tc.args.ws...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Start the same watches again to exercise the code that ensures we + // only add each watch once. + err = e.StartWatches(tc.args.name, tc.args.ws...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + watches, err := e.GetWatches(tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.GetWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.watches, watches, + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b WatchID) bool { return fmt.Sprintf("%s", a) > fmt.Sprintf("%s", b) }), + ); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = e.Stop(ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStopWatches(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + ctx context.Context + name string + ws []WatchID + } + type want struct { + stopped int + err error + watches []WatchID + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "NoWatchesToStop": { + reason: "StopWatches should be a no-op when there's no watches to stop.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "NeverStarted", + }, + }, + }, + }, + want: want{ + stopped: 0, + err: nil, + watches: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + "StopOneWatch": { + reason: "StopWatches should only stop the watches it's asked to.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + want: want{ + stopped: 1, + err: nil, + watches: []WatchID{ + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + "StopAllWatches": { + reason: "StopWatches should stop all watches when asked to.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "NeverStarted", + }, + }, + }, + }, + want: want{ + stopped: 2, + err: nil, + watches: []WatchID{}, + }, + }, + // TODO(negz): Test handling watches that fail to stop? I'm not sure + // it's worth the amount of complexity making StoppableSource injectable + // would add. We could make Watch an interface with a GetSource. + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + })) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Add some watches to stop. + u1 := &unstructured.Unstructured{} + u1.SetAPIVersion("test.crossplane.io/v1") + u1.SetKind("Resource") + err = e.StartWatches(tc.args.name, + WatchFor(u1, WatchTypeComposedResource, nil), + WatchFor(u1, WatchTypeCompositeResource, nil), + ) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + stopped, err := e.StopWatches(tc.args.ctx, tc.args.name, tc.args.ws...) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StopWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.stopped, stopped); diff != "" { + t.Errorf("\n%s\ne.StopWatches(...): -want stopped, +got stopped:\n%s", tc.reason, diff) + } + + watches, err := e.GetWatches(tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.GetWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.watches, watches, + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b WatchID) bool { return fmt.Sprintf("%s", a) > fmt.Sprintf("%s", b) }), + ); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/engine/source.go b/internal/engine/source.go new file mode 100644 index 000000000..e04aa3ca3 --- /dev/null +++ b/internal/engine/source.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +var _ source.Source = &StoppableSource{} + +// NewStoppableSource returns a new watch source that can be stopped. +func NewStoppableSource(infs cache.Informers, t client.Object, h handler.EventHandler, ps ...predicate.Predicate) *StoppableSource { + return &StoppableSource{infs: infs, Type: t, handler: h, predicates: ps} +} + +// A StoppableSource is a controller-runtime watch source that can be stopped. +type StoppableSource struct { + infs cache.Informers + + Type client.Object + handler handler.EventHandler + predicates []predicate.Predicate + + reg kcache.ResourceEventHandlerRegistration +} + +// Start is internal and should be called only by the Controller to register +// an EventHandler with the Informer to enqueue reconcile.Requests. +func (s *StoppableSource) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { + i, err := s.infs.GetInformer(ctx, s.Type, cache.BlockUntilSynced(true)) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + reg, err := i.AddEventHandler(NewEventHandler(ctx, q, s.handler, s.predicates...).HandlerFuncs()) + if err != nil { + return errors.Wrapf(err, "cannot add event handler") + } + s.reg = reg + + return nil +} + +// Stop removes the EventHandler from the source's Informer. The Informer will +// stop sending events to the source. +func (s *StoppableSource) Stop(ctx context.Context) error { + if s.reg == nil { + return nil + } + + i, err := s.infs.GetInformer(ctx, s.Type) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + if err := i.RemoveEventHandler(s.reg); err != nil { + return errors.Wrap(err, "cannot remove event handler") + } + + s.reg = nil + return nil +} + +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, q workqueue.RateLimitingInterface, h handler.EventHandler, ps ...predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: h, + queue: q, + predicates: ps, + } +} + +// An EventHandler converts a controller-runtime handler and predicates into a +// client-go ResourceEventHandler. It's a stripped down version of +// controller-runtime's internal implementation. +// https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.2/pkg/internal/source/event_handler.go#L35 +type EventHandler struct { + ctx context.Context //nolint:containedctx // Kept for compatibility with controller-runtime. + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs. +func (e *EventHandler) HandlerFuncs() kcache.ResourceEventHandlerFuncs { + return kcache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e *EventHandler) OnAdd(obj interface{}) { + o, ok := obj.(client.Object) + if !ok { + return + } + + c := event.CreateEvent{Object: o} + for _, p := range e.predicates { + if !p.Create(c) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { + o, ok := oldObj.(client.Object) + if !ok { + return + } + + n, ok := newObj.(client.Object) + if !ok { + return + } + + u := event.UpdateEvent{ObjectOld: o, ObjectNew: n} + + for _, p := range e.predicates { + if !p.Update(u) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e *EventHandler) OnDelete(obj interface{}) { + var d event.DeleteEvent + + switch o := obj.(type) { + case client.Object: + d = event.DeleteEvent{Object: o} + + // Deal with tombstone events by pulling the object out. Tombstone events + // wrap the object in a DeleteFinalStateUnknown struct, so the object needs + // to be pulled out. + case kcache.DeletedFinalStateUnknown: + wrapped, ok := o.Obj.(client.Object) + if !ok { + return + } + d = event.DeleteEvent{DeleteStateUnknown: true, Object: wrapped} + + default: + return + } + + for _, p := range e.predicates { + if !p.Delete(d) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) +} diff --git a/internal/engine/source_test.go b/internal/engine/source_test.go new file mode 100644 index 000000000..e855c4ad9 --- /dev/null +++ b/internal/engine/source_test.go @@ -0,0 +1,272 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ cache.Informer = &MockInformer{} + +type MockInformer struct { + cache.Informer + + MockAddEventHandler func(handler kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) + MockRemoveEventHandler func(handle kcache.ResourceEventHandlerRegistration) error +} + +func (m *MockInformer) AddEventHandler(handler kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return m.MockAddEventHandler(handler) +} + +func (m *MockInformer) RemoveEventHandler(handle kcache.ResourceEventHandlerRegistration) error { + return m.MockRemoveEventHandler(handle) +} + +func TestStartSource(t *testing.T) { + type params struct { + infs cache.Informers + t client.Object + h handler.EventHandler + ps []predicate.Predicate + } + type args struct { + ctx context.Context + q workqueue.RateLimitingInterface + } + type want struct { + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "GetInformerError": { + reason: "Start should return an error if it can't get an informer for the supplied type.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, errors.New("boom") + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "AddEventHandlerError": { + reason: "Start should return an error if it can't add an event handler to the informer.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return nil, errors.New("boom") + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "SuccessfulStart": { + reason: "Start should return nil if it successfully starts the source.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return nil, nil + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := NewStoppableSource(tc.params.infs, tc.params.t, tc.params.h, tc.params.ps...) + + err := s.Start(tc.args.ctx, tc.args.q) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +var _ kcache.ResourceEventHandlerRegistration = &MockRegistration{} + +type MockRegistration struct{} + +func (m *MockRegistration) HasSynced() bool { return true } + +func TestStopSource(t *testing.T) { + type params struct { + infs cache.Informers + t client.Object + h handler.EventHandler + ps []predicate.Predicate + } + type args struct { + ctx context.Context + q workqueue.RateLimitingInterface + } + type want struct { + err error + } + + // Used to return an error only when getting an informer to stop it. + started := false + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "SuccessfulStop": { + reason: "Stop should return nil if it successfully stops the source.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + MockRemoveEventHandler: func(_ kcache.ResourceEventHandlerRegistration) error { + return nil + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: nil, + }, + }, + "GetInformerError": { + reason: "Stop should return an error if it can't get an informer.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + if !started { + started = true + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + }, nil + } + return nil, errors.New("boom") + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "RemoveEventHandlerError": { + reason: "Stop should return an error if it can't remove the source's event handler.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + MockRemoveEventHandler: func(_ kcache.ResourceEventHandlerRegistration) error { + return errors.New("boom") + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := NewStoppableSource(tc.params.infs, tc.params.t, tc.params.h, tc.params.ps...) + + err := s.Start(tc.args.ctx, tc.args.q) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = s.Stop(tc.args.ctx) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} From 3c1a9102548cff837cfe47750de8a347a8eee7ae Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 08:09:05 +0000 Subject: [PATCH 211/370] chore(deps): update actions/checkout digest to a5ac7e5 --- .github/workflows/backport.yml | 2 +- .github/workflows/ci.yml | 16 ++++++++-------- .github/workflows/commands.yml | 2 +- .github/workflows/promote.yml | 2 +- .github/workflows/scan.yaml | 2 +- .github/workflows/tag.yml | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 4ffdd8f46..92ce498e4 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,7 +22,7 @@ jobs: if: github.event.pull_request.merged steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 390b8bf60..1b5a51f7a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -81,7 +81,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -127,7 +127,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -171,7 +171,7 @@ jobs: if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -192,7 +192,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -261,7 +261,7 @@ jobs: install: true - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -352,7 +352,7 @@ jobs: install: true - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true @@ -472,7 +472,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Setup Buf uses: bufbuild/buf-setup-action@v1 diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index 9cb34aa1f..e81e6b509 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -21,7 +21,7 @@ jobs: permission-level: write - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index b2018c533..ee28e24fb 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -36,7 +36,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: true diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index ef5707ae6..a376ccba7 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -17,7 +17,7 @@ jobs: supported_releases: ${{ steps.get-releases.outputs.supported_releases }} steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml index cb730a0bb..22994508d 100644 --- a/.github/workflows/tag.yml +++ b/.github/workflows/tag.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Create Tag uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1 From 7d8dcdf2533bfba1d20e532cd01494a9a9e9be6b Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 08:09:08 +0000 Subject: [PATCH 212/370] chore(deps): update codecov/codecov-action digest to 125fc84 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 390b8bf60..962445880 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: run: make -j2 test - name: Publish Unit Test Coverage - uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4 + uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4 with: flags: unittests file: _output/tests/linux_amd64/coverage.txt From 0c09a86748d907537842902c0fa757e3ece1dae9 Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Thu, 16 May 2024 11:13:26 +0530 Subject: [PATCH 213/370] added template name to error Signed-off-by: Neeraj Nagure --- .../controller/apiextensions/composite/composition_pt.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 023afa2fa..069113fed 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -45,7 +45,7 @@ import ( const ( errGetComposed = "cannot get composed resource" errGCComposed = "cannot garbage collect composed resource" - errApplyComposed = "cannot apply composed resource" + errApplyComposed = "cannot apply composed resource %q" errFetchDetails = "cannot fetch connection details" errInline = "cannot inline Composition patch sets" @@ -279,7 +279,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // run again the composition after some other resource is // created or updated successfully. So, we emit a warning event // and move on. - events = append(events, event.Warning(reasonCompose, errors.Wrap(err, errApplyComposed))) + events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))))) // We unset the cd here so that we don't try to observe it // later. This will also mean we report it as not ready and not // synced. Resulting in the XR being reported as not ready nor @@ -291,7 +291,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // TODO(negz): Include the template name (if any) in this error. // Including the rendered resource's kind may help too (e.g. if the // template is anonymous). - return CompositionResult{}, errors.Wrap(err, errApplyComposed) + return CompositionResult{}, errors.Wrapf(err, errApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))) } } From 1a110846a6a49a0ea95e076a9d405fc469d615e4 Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Sat, 18 May 2024 12:38:59 +0530 Subject: [PATCH 214/370] renamed errApplyComposed and fixed unit tests Signed-off-by: Neeraj Nagure --- .../fn/proto/v1beta1/run_function.pb.go | 47 +++++++++---------- .../apiextensions/composite/composition_pt.go | 14 +++--- .../composite/composition_pt_test.go | 2 +- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index ef5f1a8de..595d30a22 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.25.0-devel // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/run_function.proto @@ -283,7 +283,6 @@ type Credentials struct { // Source of the credentials. // // Types that are assignable to Source: - // // *Credentials_CredentialData Source isCredentials_Source `protobuf_oneof:"source"` } @@ -454,6 +453,7 @@ type RunFunctionResponse struct { // concerned with. A Function must pass through any part of the desired state // that it is not concerned with. // + // // Note that the desired state must be a partial object with only the fields // that this function (and its predecessors in the pipeline) wants to have // set in the object. Copying a non-partial observed state to desired is most @@ -650,7 +650,6 @@ type ResourceSelector struct { // Resources to match. // // Types that are assignable to Match: - // // *ResourceSelector_MatchName // *ResourceSelector_MatchLabels Match isResourceSelector_Match `protobuf_oneof:"match"` @@ -916,41 +915,41 @@ type Resource struct { // The JSON representation of the resource. // - // - Crossplane will set this field in a RunFunctionRequest to the entire - // observed state of a resource - including its metadata, spec, and status. + // * Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. // - // - A Function should set this field in a RunFunctionRequest to communicate - // the desired state of a composite or composed resource. + // * A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. // - // - A Function may only specify the desired status of a composite resource - - // not its metadata or spec. A Function should not return desired metadata - // or spec for a composite resource. This will be ignored. + // * A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. // - // - A Function may not specify the desired status of a composed resource - - // only its metadata and spec. A Function should not return desired status - // for a composed resource. This will be ignored. + // * A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // The resource's connection details. // - // - Crossplane will set this field in a RunFunctionRequest to communicate the - // the observed connection details of a composite or composed resource. + // * Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. // - // - A Function should set this field in a RunFunctionResponse to indicate the - // desired connection details of the composite resource. + // * A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. // - // - A Function should not set this field in a RunFunctionResponse to indicate - // the desired connection details of a composed resource. This will be - // ignored. + // * A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Ready indicates whether the resource should be considered ready. // // * Crossplane will never set this field in a RunFunctionRequest. // - // - A Function should set this field to READY_TRUE in a RunFunctionResponse - // to indicate that a desired composed resource is ready. + // * A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. // - // - A Function should not set this field in a RunFunctionResponse to indicate - // that the desired composite resource is ready. This will be ignored. + // * A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` } diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 069113fed..b22d74a47 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -43,12 +43,12 @@ import ( // Error strings. const ( - errGetComposed = "cannot get composed resource" - errGCComposed = "cannot garbage collect composed resource" - errApplyComposed = "cannot apply composed resource %q" - errFetchDetails = "cannot fetch connection details" - errInline = "cannot inline Composition patch sets" + errGetComposed = "cannot get composed resource" + errGCComposed = "cannot garbage collect composed resource" + errFetchDetails = "cannot fetch connection details" + errInline = "cannot inline Composition patch sets" + errFmtApplyComposed = "cannot apply composed resource %q" errFmtPatchEnvironment = "cannot apply environment patch at index %d" errFmtParseBase = "cannot parse base template of composed resource %q" errFmtRenderFromCompositePatches = "cannot render FromComposite or environment patches for composed resource %q" @@ -279,7 +279,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // run again the composition after some other resource is // created or updated successfully. So, we emit a warning event // and move on. - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))))) + events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))))) // We unset the cd here so that we don't try to observe it // later. This will also mean we report it as not ready and not // synced. Resulting in the XR being reported as not ready nor @@ -291,7 +291,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // TODO(negz): Include the template name (if any) in this error. // Including the rendered resource's kind may help too (e.g. if the // template is anonymous). - return CompositionResult{}, errors.Wrapf(err, errApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))) + return CompositionResult{}, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))) } } diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index 52558ab59..efb8e3059 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -203,7 +203,7 @@ func TestPTCompose(t *testing.T) { }, }, want: want{ - err: errors.Wrap(errors.Wrap(errBoom, "cannot create object"), errApplyComposed), + err: errors.Wrapf(errors.Wrap(errBoom, "cannot create object"), errFmtApplyComposed, "cool-resource"), }, }, "FetchConnectionDetailsError": { From d88600fe30402bc6aa4e81e86f6fb1c7283dc4e0 Mon Sep 17 00:00:00 2001 From: Chuan-Yen Chiang Date: Tue, 21 May 2024 18:51:58 +0800 Subject: [PATCH 215/370] resolves crossplane/crossplane#5717 Signed-off-by: Chuan-Yen Chiang Signed-off-by: Chuan-Yen Chiang --- cmd/crank/beta/xpkg/init.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index b8f6d57c6..424782ee0 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -235,6 +235,7 @@ func printFile(w io.Writer, path string) error { if _, err := fmt.Fprintf(w, "\n%s\n", content); err != nil { return errors.Wrap(err, "failed to write to stdout") } + defer func() { _ = f.Close() }() return nil } From 9644d74faa585729e5b2b65c65246c3abfcad8cf Mon Sep 17 00:00:00 2001 From: Chuan-Yen Chiang Date: Tue, 21 May 2024 18:59:05 +0800 Subject: [PATCH 216/370] follow coding style, use lint ignore if the err output is safe to ignore. Signed-off-by: Chuan-Yen Chiang --- cmd/crank/beta/xpkg/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index 424782ee0..f45cc0c2a 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -235,7 +235,7 @@ func printFile(w io.Writer, path string) error { if _, err := fmt.Fprintf(w, "\n%s\n", content); err != nil { return errors.Wrap(err, "failed to write to stdout") } - defer func() { _ = f.Close() }() + defer f.Close() //nolint:errcheck // It's safe to ingore the error here. return nil } From de5b33a4762adc81289b87143ef61cbaef2521c8 Mon Sep 17 00:00:00 2001 From: Chuan-Yen Chiang Date: Tue, 21 May 2024 22:44:23 +0800 Subject: [PATCH 217/370] Update the code based on the feedback Signed-off-by: Chuan-Yen Chiang --- cmd/crank/beta/xpkg/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index f45cc0c2a..c5be0f171 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -228,6 +228,7 @@ func printFile(w io.Writer, path string) error { if err != nil { return errors.Wrapf(err, "failed to open file %s", path) } + defer f.Close() //nolint:errcheck // It's safe to ingore the error here. content, err := io.ReadAll(f) if err != nil { return errors.Wrapf(err, "failed to read file %s", path) @@ -235,7 +236,6 @@ func printFile(w io.Writer, path string) error { if _, err := fmt.Fprintf(w, "\n%s\n", content); err != nil { return errors.Wrap(err, "failed to write to stdout") } - defer f.Close() //nolint:errcheck // It's safe to ingore the error here. return nil } From 982aa2d6b486d51472e077b0594493ae79ba7483 Mon Sep 17 00:00:00 2001 From: Chuan-Yen Chiang Date: Wed, 22 May 2024 10:06:06 +0800 Subject: [PATCH 218/370] fix typo Signed-off-by: Chuan-Yen Chiang --- cmd/crank/beta/xpkg/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/beta/xpkg/init.go index c5be0f171..f82b7cb49 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/beta/xpkg/init.go @@ -228,7 +228,7 @@ func printFile(w io.Writer, path string) error { if err != nil { return errors.Wrapf(err, "failed to open file %s", path) } - defer f.Close() //nolint:errcheck // It's safe to ingore the error here. + defer f.Close() //nolint:errcheck // It's safe to ignore the error because it only do read operation. content, err := io.ReadAll(f) if err != nil { return errors.Wrapf(err, "failed to read file %s", path) From 7be5df64f991ccc37db4c4d822e25daf0d24f67f Mon Sep 17 00:00:00 2001 From: Chuan-Yen Chiang Date: Wed, 22 May 2024 11:17:29 +0800 Subject: [PATCH 219/370] Unset some UP env vars to avoid unexpect test behaviour. Signed-off-by: Chuan-Yen Chiang --- internal/xpkg/upbound/context_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/xpkg/upbound/context_test.go b/internal/xpkg/upbound/context_test.go index 7d4b73d07..d46b39f98 100644 --- a/internal/xpkg/upbound/context_test.go +++ b/internal/xpkg/upbound/context_test.go @@ -19,6 +19,7 @@ package upbound import ( "fmt" "net/url" + "os" "path/filepath" "testing" @@ -274,6 +275,11 @@ func TestNewFromFlags(t *testing.T) { } for name, tc := range cases { + // Unset common UP env vars used by the test to avoid unexpect behaviours describe in #5721 + os.Unsetenv("UP_ACCOUNT") + os.Unsetenv("UP_DOMAIN") + os.Unsetenv("UP_PROFILE") + os.Unsetenv("UP_INSECURE_SKIP_TLS_VERIFY") t.Run(name, func(t *testing.T) { flags := Flags{} parser, _ := kong.New(&flags) From f5bebfd0b3cbc47f21c3cd690bf992aa153bc3f0 Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Wed, 22 May 2024 10:15:02 +0530 Subject: [PATCH 220/370] fixed protoc-gen-go version Signed-off-by: Neeraj Nagure --- apis/apiextensions/fn/proto/v1beta1/run_function.pb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index 595d30a22..4fce94bb5 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.33.0 // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/run_function.proto From 8e08fbfa750ead710565a89daef46e7f7e2c85bb Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Wed, 22 May 2024 10:23:06 +0530 Subject: [PATCH 221/370] fixed run_function.pb.go Signed-off-by: Neeraj Nagure --- .../fn/proto/v1beta1/run_function.pb.go | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index 4fce94bb5..ef5f1a8de 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -283,6 +283,7 @@ type Credentials struct { // Source of the credentials. // // Types that are assignable to Source: + // // *Credentials_CredentialData Source isCredentials_Source `protobuf_oneof:"source"` } @@ -453,7 +454,6 @@ type RunFunctionResponse struct { // concerned with. A Function must pass through any part of the desired state // that it is not concerned with. // - // // Note that the desired state must be a partial object with only the fields // that this function (and its predecessors in the pipeline) wants to have // set in the object. Copying a non-partial observed state to desired is most @@ -650,6 +650,7 @@ type ResourceSelector struct { // Resources to match. // // Types that are assignable to Match: + // // *ResourceSelector_MatchName // *ResourceSelector_MatchLabels Match isResourceSelector_Match `protobuf_oneof:"match"` @@ -915,41 +916,41 @@ type Resource struct { // The JSON representation of the resource. // - // * Crossplane will set this field in a RunFunctionRequest to the entire - // observed state of a resource - including its metadata, spec, and status. + // - Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. // - // * A Function should set this field in a RunFunctionRequest to communicate - // the desired state of a composite or composed resource. + // - A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. // - // * A Function may only specify the desired status of a composite resource - - // not its metadata or spec. A Function should not return desired metadata - // or spec for a composite resource. This will be ignored. + // - A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. // - // * A Function may not specify the desired status of a composed resource - - // only its metadata and spec. A Function should not return desired status - // for a composed resource. This will be ignored. + // - A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // The resource's connection details. // - // * Crossplane will set this field in a RunFunctionRequest to communicate the - // the observed connection details of a composite or composed resource. + // - Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. // - // * A Function should set this field in a RunFunctionResponse to indicate the - // desired connection details of the composite resource. + // - A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. // - // * A Function should not set this field in a RunFunctionResponse to indicate - // the desired connection details of a composed resource. This will be - // ignored. + // - A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Ready indicates whether the resource should be considered ready. // // * Crossplane will never set this field in a RunFunctionRequest. // - // * A Function should set this field to READY_TRUE in a RunFunctionResponse - // to indicate that a desired composed resource is ready. + // - A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. // - // * A Function should not set this field in a RunFunctionResponse to indicate - // that the desired composite resource is ready. This will be ignored. + // - A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` } From a6abe33eacef733a2bdcbeb11e67e1f918341912 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 08:09:51 +0000 Subject: [PATCH 222/370] chore(deps): update github/codeql-action digest to 9fdb3e4 --- .github/workflows/ci.yml | 4 ++-- .github/workflows/scan.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 72741f977..8506f323c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,12 +158,12 @@ jobs: run: make modules.download modules.check - name: Initialize CodeQL - uses: github/codeql-action/init@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 + uses: github/codeql-action/init@9fdb3e49720b44c48891d036bb502feb25684276 # v3 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 + uses: github/codeql-action/analyze@9fdb3e49720b44c48891d036bb502feb25684276 # v3 trivy-scan-fs: runs-on: ubuntu-22.04 diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index a376ccba7..13f7e3b3f 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -124,7 +124,7 @@ jobs: retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3 + uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} From 3a8616775e457733d2eec364c46162c7a7ce5bf2 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Fri, 24 May 2024 08:11:10 +0000 Subject: [PATCH 223/370] fix(deps): update module github.com/docker/docker-credential-helpers to v0.8.2 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 002770eb5..7f1bbb84f 100644 --- a/go.mod +++ b/go.mod @@ -132,7 +132,7 @@ require ( github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/cli v24.0.7+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.1 + github.com/docker/docker-credential-helpers v0.8.2 github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect diff --git a/go.sum b/go.sum index 3ed062774..3589a5c3e 100644 --- a/go.sum +++ b/go.sum @@ -153,8 +153,8 @@ github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From 808e30bef1345ef110c7ee5908118156722d2417 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 08:13:09 +0000 Subject: [PATCH 224/370] chore(deps): update dependency golangci/golangci-lint to v1.58.2 --- .github/workflows/ci.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8506f323c..81cf010b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ on: env: # Common versions GO_VERSION: '1.22.3' - GOLANGCI_VERSION: 'v1.57.2' + GOLANGCI_VERSION: 'v1.58.2' DOCKER_BUILDX_VERSION: 'v0.10.0' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run diff --git a/Makefile b/Makefile index b9595a1e7..90e5aa6cd 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION) GO_SUBDIRS += cmd internal apis pkg GO111MODULE = on -GOLANGCILINT_VERSION = 1.57.2 +GOLANGCILINT_VERSION = 1.58.2 GO_LINT_ARGS ?= "--fix" -include build/makelib/golang.mk From 481929791f0081651a47429f62a07f9bab049ed5 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:40:49 +0100 Subject: [PATCH 225/370] chore: disable deprecated execinquery linter Signed-off-by: Philippe Scorsolini --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index 7e46dbe35..8f9a9c6aa 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,6 +15,7 @@ linters: # These linters are all deprecated. We disable them explicitly to avoid the # linter logging deprecation warnings. - deadcode + - execinquery - varcheck - scopelint - structcheck From c5024e96efc12b79d073cf8734cb235d017d41c0 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:41:09 +0100 Subject: [PATCH 226/370] chore: rename goerr113 to err113 Signed-off-by: Philippe Scorsolini --- .golangci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 8f9a9c6aa..4fad6b97b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,7 +58,7 @@ linters: # 1 is covered by other linters. 2 is covered by wrapcheck, which can also # handle our use of crossplane-runtime's errors package. 3 is more strict # than we need. Not every error needs to be tested for equality. - - goerr113 + - err113 # These linters duplicate gocognit, but calculate complexity differently. - gocyclo From 1c224d1f33eac754e78a54e69eed037d374ad5bb Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:41:40 +0100 Subject: [PATCH 227/370] chore: disable mnd linter Signed-off-by: Philippe Scorsolini --- .golangci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 4fad6b97b..eace5eae5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -108,6 +108,10 @@ linters: # Go v1.22 and above: https://tip.golang.org/doc/go1.22 - exportloopref + # Warns about using magic numbers. We do think it's best to avoid magic + # numbers, but we should not be strict about it. + - mnd + linters-settings: errcheck: # report about not checking of errors in type assetions: `a := b.(MyStruct)`; From 4909402bc9d647c86e8c53ebf5cc43c67f690020 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:45:12 +0100 Subject: [PATCH 228/370] chore: update govet syntax to disable shadow Signed-off-by: Philippe Scorsolini --- .golangci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index eace5eae5..59360c6e3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -129,7 +129,8 @@ linters-settings: govet: # report about shadowed variables - check-shadowing: false + disable: + - shadow gofmt: # simplify code: gofmt with `-s` option, true by default From 8734b1ed15beadca0d223d761fd77b34f1a0dbe3 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:46:08 +0100 Subject: [PATCH 229/370] chore: drop deprecated maligned linter Signed-off-by: Philippe Scorsolini --- .golangci.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 59360c6e3..a579c36d7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -146,10 +146,6 @@ linters-settings: - blank - dot - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true - dupl: # tokens count to trigger issue, 150 by default threshold: 100 From 09dcbc23b8e66c1a7adfa6c63f4eee7951763010 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:46:55 +0100 Subject: [PATCH 230/370] chore: update unparam deprecated config Signed-off-by: Philippe Scorsolini --- .golangci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index a579c36d7..a583905c8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -165,7 +165,8 @@ linters-settings: # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: # if it's called for subdir of a project it can't find funcs usages. All text editor integrations # with golangci-lint call it on a directory with the changed file. - check-exported: false + exported-is-used: true + exported-fields-are-used: true unparam: # Inspect exported functions, default is false. Set to true if no external program/library imports your code. From fc66a6fb453b528ccc7afbacb3b799837e5c64ee Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 12:47:16 +0100 Subject: [PATCH 231/370] chore: update deprecated max issues per linter syntax Signed-off-by: Philippe Scorsolini --- .golangci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index a583905c8..5d5d4c44c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -318,7 +318,7 @@ issues: new: false # Maximum issues count per one linter. Set to 0 to disable. Default is 50. - max-per-linter: 0 + max-issues-per-linter: 0 # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. max-same-issues: 0 From 8df2a823344b6dea82007442812b74b6d410dee3 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 13:04:52 +0100 Subject: [PATCH 232/370] chore: remove unnecessary and deprecated errcheck.ignore Signed-off-by: Philippe Scorsolini --- .golangci.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5d5d4c44c..09df62272 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -122,11 +122,6 @@ linters-settings: # default is false: such cases aren't reported by default. check-blank: false - # [deprecated] comma-separated list of pairs of the form pkg:regex - # the regex is used to ignore names within pkg. (default "fmt:.*"). - # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io/ioutil:^Read.* - govet: # report about shadowed variables disable: From e2bee102153b45220c56069f59bfc23008698d75 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 24 May 2024 13:05:28 +0100 Subject: [PATCH 233/370] chore: ignore few fatcontext linter errors Signed-off-by: Philippe Scorsolini --- test/e2e/funcs/env.go | 1 + test/e2e/funcs/feature.go | 1 + 2 files changed, 2 insertions(+) diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index 82b9e9f52..9fbe3c2b2 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -105,6 +105,7 @@ func EnvFuncs(fns ...env.Func) env.Func { return func(ctx context.Context, c *envconf.Config) (context.Context, error) { for _, fn := range fns { var err error + //nolint:fatcontext // We want to pass the context down the chain. ctx, err = fn(ctx, c) if err != nil { return ctx, err diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index c84c9748e..e0cb39704 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -74,6 +74,7 @@ func AllOf(fns ...features.Func) features.Func { t.Helper() for _, fn := range fns { + //nolint:fatcontext // We want to pass the context to each function. ctx = fn(ctx, t, c) if t.Failed() && c.FailFast() { break From 7f4503e462df7036facb2e83c61beb666733b9f4 Mon Sep 17 00:00:00 2001 From: Helias Rodrigues Date: Fri, 19 Apr 2024 14:02:48 -0300 Subject: [PATCH 234/370] Update ADOPTERS.md Add CloudScript adopter. Signed-off-by: Helias Rodrigues --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index f492479b6..89861140c 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -83,6 +83,7 @@ This list is sorted in the order that organizations were added to it. | [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | | [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| | [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | +| [CloudScript](https://www.cloudscript.com.br/) | @xcloudscript | [CloudScript](https://www.cloudscript.com.br/) engineers have been using Crossplane since 2022 creating customized Compositions for the implementation of our engineering platform, basically automating the creation of Kubernetes environments on AWS, GCP and Azure ( coming soon).| | [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | | [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | | [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane in production environments to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | From c10a05b68ee28837b57960ca54cc0196b665df7b Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Fri, 24 May 2024 18:12:24 -0700 Subject: [PATCH 235/370] adopters: add Zuru Tech Italy to ADOPTERS.md Signed-off-by: Jared Watts --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 89861140c..0e6c0a91d 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -89,3 +89,4 @@ This list is sorted in the order that organizations were added to it. | [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane in production environments to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | | [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | +| [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | \ No newline at end of file From 406f2dfc51bd00ce3d00417d012ea8748263061f Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 25 May 2024 01:22:33 +0000 Subject: [PATCH 236/370] fix(deps): update module github.com/golang-jwt/jwt/v5 to v5.2.1 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7f1bbb84f..4af315123 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/emicklei/dot v1.6.2 github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.11.0 - github.com/golang-jwt/jwt/v5 v5.2.0 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.19.0 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2 diff --git a/go.sum b/go.sum index 3589a5c3e..cf5ddbfa2 100644 --- a/go.sum +++ b/go.sum @@ -217,8 +217,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From 5e743d47a771b7bba13682ec289005779c3fc491 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 25 May 2024 08:08:26 +0000 Subject: [PATCH 237/370] chore(deps): update aquasecurity/trivy-action action to v0.21.0 --- .github/workflows/ci.yml | 2 +- .github/workflows/scan.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c25a92aa4..c08633383 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,7 +176,7 @@ jobs: submodules: true - name: Run Trivy vulnerability scanner in fs mode - uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # 0.19.0 + uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 with: scan-type: 'fs' ignore-unfixed: true diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 13f7e3b3f..797f2e6a0 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -110,7 +110,7 @@ jobs: run: docker pull ${{ matrix.image }}:${{ env.tag }} - name: Run Trivy Vulnerability Scanner - uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # 0.19.0 + uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 with: image-ref: ${{ matrix.image }}:${{ env.tag }} format: 'sarif' From 20999829cf53400328c9f61af9b1cb3afd41d0bc Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sat, 25 May 2024 09:12:53 +0100 Subject: [PATCH 238/370] chore: ignore checklist for renovate prs Signed-off-by: Philippe Scorsolini --- .github/workflows/pr.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 868a497af..ce7671be5 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -6,10 +6,10 @@ on: jobs: checklist-completed: - if: github.actor != 'renovate[bot]' + if: github.actor != 'crossplane-renovate[bot]' runs-on: ubuntu-22.04 steps: - uses: mheap/require-checklist-action@01fe24747f8630a056d9ca79dfbbb755579850ab # v2 with: # The checklist must _exist_ and be filled out. - requireChecklist: true \ No newline at end of file + requireChecklist: true From 685ed5a3e3d1d6976dbb478de0c7e486d81d9ca0 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Sat, 25 May 2024 20:19:16 +0100 Subject: [PATCH 239/370] fix(trace): set default qps and burst back Signed-off-by: Philippe Scorsolini --- cmd/crank/beta/trace/trace.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 943f6d732..98704c9c0 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -124,6 +124,19 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { if err != nil { return errors.Wrap(err, errKubeConfig) } + + // NOTE(phisco): We used to get them set as part of + // https://github.com/kubernetes-sigs/controller-runtime/blob/2e9781e9fc6054387cf0901c70db56f0b0a63083/pkg/client/config/config.go#L96, + // this new approach doesn't set them, so we need to set them here to avoid + // being utterly slow. + // TODO(phisco): make this configurable. + if kubeconfig.QPS == 0 { + kubeconfig.QPS = 20 + } + if kubeconfig.Burst == 0 { + kubeconfig.Burst = 30 + } + logger.Debug("Found kubeconfig") client, err := client.New(kubeconfig, client.Options{ From 65cb5015968647bc258b213d832ccfe3013e2468 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Sat, 25 May 2024 20:57:24 -0700 Subject: [PATCH 240/370] build: allow CRD patch dir to already exist Signed-off-by: Jared Watts --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 90e5aa6cd..08c322816 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ CRD_PATCH_DIR = cluster/crd-patches # See patch files for details. crds.patch: $(KUBECTL) @$(INFO) patching generated CRDs - @mkdir $(WORK_DIR)/patch + @mkdir -p $(WORK_DIR)/patch @$(KUBECTL) patch --local --type=json -f $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml --patch-file $(CRD_PATCH_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml -o yaml > $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml @mv $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml @$(OK) patched generated CRDs From 59d407205ba33c086d8e15f82af1762eb987d80a Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sun, 26 May 2024 08:08:36 +0000 Subject: [PATCH 241/370] chore(deps): update zeebe-io/backport-action action to v2.5.0 --- .github/workflows/backport.yml | 2 +- .github/workflows/commands.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 92ce498e4..ec43b1681 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -27,7 +27,7 @@ jobs: fetch-depth: 0 - name: Open Backport PR - uses: zeebe-io/backport-action@e8161d6a0dbfa2651b7daa76cbb75bc7c925bbf3 # v2.4.1 + uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} github_workspace: ${{ github.workspace }} diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index e81e6b509..b7182a88b 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - name: Open Backport PR - uses: zeebe-io/backport-action@e8161d6a0dbfa2651b7daa76cbb75bc7c925bbf3 # v2.4.1 + uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} github_workspace: ${{ github.workspace }} From 10c5aeb0b0d4e21d492395d96a826b9d43093354 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Mon, 27 May 2024 18:43:54 -0700 Subject: [PATCH 242/370] fix(deps): update module github.com/google/go-containerregistry to v0.19.1 Signed-off-by: Jared Watts --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4af315123..5d4abdd50 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/go-git/go-git/v5 v5.11.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.19.0 + github.com/google/go-containerregistry v0.19.1 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2 github.com/jmattheis/goverter v1.3.2 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index cf5ddbfa2..a9fce6aea 100644 --- a/go.sum +++ b/go.sum @@ -252,8 +252,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= -github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2 h1:ChuUQ1y5Vf+Eev+UgEed/ljibTIcWY7mYPtWYLK7fxU= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= From 6de28f9a2ca350019cc13d7d6dcbfed63ea8ffd8 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 21 May 2024 17:40:59 -0700 Subject: [PATCH 243/370] Add a one-pager proposing we adopt Earthly Signed-off-by: Nic Cope --- design/one-pager-build-with-earthly.md | 249 +++++++++++++++++++++++++ 1 file changed, 249 insertions(+) create mode 100644 design/one-pager-build-with-earthly.md diff --git a/design/one-pager-build-with-earthly.md b/design/one-pager-build-with-earthly.md new file mode 100644 index 000000000..a2bc4ecc4 --- /dev/null +++ b/design/one-pager-build-with-earthly.md @@ -0,0 +1,249 @@ +# Build with Earthly + +* Owner: Nic Cope (@negz) +* Status: Proposed + +## Background + +Crossplane uses a `Makefile` with several targets, like `make build`, to +automate tasks that developers frequently need to run when developing +Crossplane. + +Crossplane also uses GitHub Actions for continous integration (CI), to validate +pull requests. Most of Crossplane's GitHub Actions workflows run the same Make +targets. This creates some consistency between local development and CI. For +example `make test` should have the same result whether run locally or in CI. + +The `Makefile` includes a moderate library of other `Makefiles`. These are +imported from the `build/makelib` directory. The `build` directory is a Git +submodule. Its source is https://github.com/crossplane/build. Most maintainers +call it "the build submodule". + +Crossplane uses the build submodule to: + +- Install pinned versions of common tools (`helm`, `kubectl`, etc) +- Cross-compile Crossplane for several platforms +- Produce a multi-platform OCI image for Crossplane +- Run code generation - e.g. `go generate` +- Validate code by running linters, unit tests, and end-to-end (E2E) tests +- Automatically derive the semantic version of artifacts from git tags +- Publish OCI image artifacts to OCI registries +- Publish binary and Helm chart artifacts to AWS S3 +- Promote artifacts to different distribution channels (i.e. tags, S3 dirs) + +The build submodule is also used by Crossplane extensions, like Providers. +Providers use the build submodule to do more than core Crossplane - for example +they use it to spin up `kind` clusters and deploy Crossplane for testing. + +In the 5+ years I've been a Crossplane maintainer, almost every new maintainer +(including myself) has expressed a dislike for the build submodule and a desire +to change build tooling. + +I believe folks dislike the build submodule because: + +- Make, as a language, has a high learning curve +- Few people have prior experience with advanced use of Make +- Needing to update a shared git submodule slows down changes to build logic + +It's worth noting that builds using the submodule aren't fully hermetic. It +strives to be hermetic: for example it uses pinned versions of tools like `helm` +and uses a per-project Go module cache. However it doesn't manage the Go +toolchain, and uses the global Go build cache. I've never heard anyone complain +about this, but it's an area that could be improved. + +## Proposal + +I proposed we switch from Make to https://earthly.dev. + +Earthly targets the 'glue' layer between language-specific tools like `go` and +CI systems like GitHub Actions. In Crossplane, Earthly would replace Make and +Docker. It's based on Docker's [BuildKit](buildkit), so all builds are +containerized and hermetic. + +### Configuration + +The Earthly equivalent of a `Makefile` is an `Earthfile`. An `Earthfile` is a +lot like a `Dockerfile`, but with Make-like targets: + +```Dockerfile +VERSION 0.8 +FROM golang:1.22 +WORKDIR /go-workdir + +deps: + COPY go.mod go.sum ./ + RUN go mod download + # Output these back in case go mod download changes them. + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +build: + FROM +deps + COPY main.go . + RUN go build -o output/example main.go + SAVE ARTIFACT output/example AS LOCAL local-output/go-example + +docker: + COPY +build/example . + ENTRYPOINT ["/go-workdir/example"] + SAVE IMAGE go-example:latest +``` + +You'd run `earthly +docker` to build the Docker target in this example. + +At first glance Earthly looks very similar to a multi-stage Dockerfile. There's +a lot of overlap, but Earthly has a bunch of extra functionality that's useful +for a general purpose build tool, including: + +* Invoking other Dockerized things ([`WITH DOCKER`][earthfile-with-docker]) - + e.g. Crossplane's E2E tests +* Exporting files that changed in the build + ([`SAVE ARTIFACT AS LOCAL`][earthfile-save-artifact]) +* Targets that are simply aliases for a bunch of other targets. +* The ability to import Earthfiles from other repos without a submodule + ([`IMPORT`][earthfile-import]). + +I feel Earthly's key strength is its Dockerfile-like syntax. Before writing this +one-pager I ported 90% of Crossplane's build from Make to Earthly. I found it +much easier to pick up and iterate on than the build submodule. + +### Performance + +Earthly is as fast as Make when developing locally, but a little slower in CI. +CI is slower because the Go build cache doesn't persist across CI runs. + +Here are a few local development comparisons using a Linux VM with 6 Apple M1 +Max vCPUs and 20GiB of memory. + +| Task | Make | Earthly | +| --- | --- | --- | +| Build with a cold cache | ~46 seconds | ~60 seconds | +| Build with a hot cache (no changes) | ~2 seconds | ~1 second | +| Build with a hot cache (one Go file changed) | ~8 seconds | ~8 seconds | +| Build for all platforms with a cold cache | ~4 minutes 10 seconds | ~4 minutes 40 seconds | +| Build for all platforms with a hot cache (one Go file changed) | ~42 seconds | ~32 seconds | + +Here are some CI comparisons run on GitHub Actions standard workers. + +| Task | Make | Earthly | +| --- | --- | --- | +| Run linters | ~3 minutes | ~4 minutes | +| Run unit tests | ~3 minutes | ~2.5 minutes | +| Publish artifacts | ~12 minutes | ~14 minutes | +| Run E2E tests | ~12 minutes | ~14 minutes | + +Earthly uses [ caching to run containerized builds as fast as Make's +"native" builds. For Crossplane this primarily means two things: + +* It caches Go modules, and will only redownload them if `go.mod` changes. +* It stores the Go build cache in a cache volume that's reused across builds. + +This caching requires the BuildKit cache to persist across runs. The BuildKit +cache doesn't persist across GitHub Actions runs, because every job runs in a +clean runner environment. + +Crossplane's Make based GitHub actions use the [cache] GitHub Action to save the +Go module cache and build cache after each run, and load it before the next. +There's no good way to do this in Earthly today, per +https://github.com/earthly/earthly/issues/1540. + +Earthly's recommended approach to caching in CI is to use their Earthly +Satellite remote runners, or host your own remote BuildKit that persists across +runs. Neither are good fits for Crossplane. Satellites are a paid product, and +hosting BuildKit would mean paying for and operating build infrastructure. + +Earthly supports 'remote caching' of build layers in an OCI registry, but this +doesn't include `CACHE` volumes (i.e. the Go build cache). Typically CI is +configured to push the cache using the `--max-remote-cache` on main builds, then +PR builds use the `--remote-cache` flag to load the cache. + +My testing indicates remote caching would have little impact for our builds. For +example building Crossplane for all platforms, with one changed Go file, a cold +local cache, and a hot remote cache was only a second faster than building with +a cold cache. This is because the difference is mostly whether Go modules are +downloaded from the Go module proxy via `go mod download` or downdloaded from an +OCI registry as a cached layer. It's possible GitHub Actions caching to GitHub +Container Registry would have a more significant impact on build times. + +## Risks + +Earthly is an early product, currently at v0.8.11. In my testing it's been +mostly stable, though I've had to restart BuildKit a small handful of times due +to errors like https://github.com/earthly/earthly/issues/2454. + +Earthly also appears to be owned and primarily staffed by a single vendor, who +presumably would like to build a business around it. This could create conflicts +of interest - for example Earthly probably isn't incentivised to make CI caching +better given they're selling a CI caching solution (Satellites). It's worth +noting that Earthly switched _from_ BSL to MPL already. + +## Alternatives Considered + +I considered the following alternatives. + +### Make, but simpler + +Make isn't so bad when you only have a small handful of really simple targets. +In theory, this could be a nice alternative - strip everything down into one +streamlined `Makefile`. + +Unfortunately I don't think there's much in `makelib` that we can eliminate to +achieve this. The functionality (pinning tools, building for multiple platforms, +etc) has to be implemented somewhere. + +### Multistage docker builds + +This is the closest alternative to Earthly. It has the notable advantage that +Docker is able to leverage bind mounts and/or [native GitHub Actions cache +support][docker-actions-cache] to cache the Go build cache across runs. + +The main reason to avoid this route is that Docker doesn't make a great general +purpose build tool. For example there's no great way to invoke our (`kind` +based) E2E tests, or even output build artifacts. Earthly makes this point +pretty well in [this article][earthly-repeatable-builds]. + +### Dagger + +[Dagger][dagger] is architecturally similar to Earthly, in that it's built on +BuildKit and all builds are containerized. It differs significantly in how you +configure your build. + +In Dagger, you install one or more Dagger Functions. You then invoke these +Functions via the `dagger` CLI. There's no equivalent of a `Makefile` or an +`Earthfile` - if you need to string multiple functions together you write a new +function that calls them, and call that function. + +The result is you end up defining your build logic in a language like Go, for +example: + +* https://docs.dagger.io/quickstart/822194/daggerize +* https://docs.dagger.io/quickstart/428201/custom-function + +I could see this becoming useful if our build logic became _really_ complex, but +in most case I prefer the simpler `Earthfile` syntax. + +### Bazel and friends + +[Bazel][bazel] and similar Google-Blaze-inspired tools like Pants and Buck focus +on fast, correct builds. They're especially well suited to large monorepos using +multiple languages, where building the entire monorepo for every change isn't +feasible. Bazel uses `BUILD` files with rules written in Starlark, a Pythonic +language. + +Bazel doesn't wrap tools like `go`, but completely replaces them. It's not +compatible with Go modules for example, and instead offers tools like `gazelle` +to generate a `BUILD` file from a module-based third party dependency. + +Bazel has a pretty large learning curve and tends to require a lot of care and +feeding to keep its `BUILD` files up-to-date. I don't feel it's a great fit for +a medium sized, single language, manyrepo project like Crossplane. + +[buildkit]: https://github.com/moby/buildkit +[earthfile-with-docker]: https://docs.earthly.dev/docs/earthfile#with-docker +[earthfile-save-artifact]: https://docs.earthly.dev/docs/earthfile#save-artifact +[earthfile-import]: https://docs.earthly.dev/docs/earthfile#import +[cache]: https://github.com/actions/cache +[docker-actions-cache]: https://docs.docker.com/build/cache/backends/gha/ +[earthly-repeatable-builds]: https://earthly.dev/blog/repeatable-builds-every-time/ +[dagger]: https://dagger.io +[bazel]: https://bazel.build \ No newline at end of file From e7a7002d5532ad8058bfcc5c3c95f62bd5d2ce1d Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 22 May 2024 18:23:24 -0700 Subject: [PATCH 244/370] Upload Trivy results to GitHub Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c08633383..52efffdf6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -182,8 +182,14 @@ jobs: ignore-unfixed: true skip-dirs: design scan-ref: '.' - exit-code: '1' severity: 'CRITICAL,HIGH' + format: sarif + output: 'trivy-results.sarif' + + - name: Upload Trivy Results to GitHub + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' unit-tests: runs-on: ubuntu-22.04 From 7b63b19543f06f3ee133be2fed4c9707cb6b8ffd Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 23 May 2024 17:25:40 -0700 Subject: [PATCH 245/370] Give Buf a GitHub token So that it stops warning about it. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 52efffdf6..f410ed7c2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -470,8 +470,6 @@ jobs: name: artifacts path: ./out/artifacts - # TODO(negz): Refactor this job. Should the parts pertaining to release - # branches live in promote.yaml instead? protobuf-schemas: runs-on: ubuntu-22.04 needs: detect-noop @@ -483,6 +481,8 @@ jobs: - name: Setup Buf uses: bufbuild/buf-setup-action@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - name: Lint Protocol Buffers uses: bufbuild/buf-lint-action@v1 From fdc79723225e3ca45c6a9f56785376dba3aa2e04 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 24 May 2024 22:03:28 -0700 Subject: [PATCH 246/370] Format Renovate config The current format uses }, { style, which I find makes it much more laborious to edit, especially when I want to quickly copy and modify a block. Signed-off-by: Nic Cope --- .github/renovate.json5 | 137 +++++++++++++++++++++++++++++------------ 1 file changed, 96 insertions(+), 41 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 8440ced47..de2d7118e 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -5,36 +5,51 @@ "helpers:pinGitHubActionDigests", ":semanticCommits" ], -// We only want renovate to rebase PRs when they have conflicts, -// default "auto" mode is not required. + // We only want renovate to rebase PRs when they have conflicts, default + // "auto" mode is not required. "rebaseWhen": "conflicted", -// The maximum number of PRs to be created in parallel + // The maximum number of PRs to be created in parallel "prConcurrentLimit": 5, -// The branches renovate should target -// PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.14","release-1.15", "release-1.16"], + // The branches renovate should target + // PLEASE UPDATE THIS WHEN RELEASING. + "baseBranches": [ + "master", + "release-1.14", + "release-1.15", + "release-1.16" + ], "ignorePaths": [ "design/**", // We test upgrades, so leave it on an older version on purpose. "test/e2e/manifests/pkg/provider/provider-initial.yaml", ], - "postUpdateOptions": ["gomodTidy"], -// All PRs should have a label - "labels": ["automated"], + "postUpdateOptions": [ + "gomodTidy" + ], + // All PRs should have a label + "labels": [ + "automated" + ], "customManagers": [ { "customType": "regex", "description": "Bump Go version used in workflows", - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], + "fileMatch": [ + "^\\.github\\/workflows\\/[^/]+\\.ya?ml$" + ], "matchStrings": [ "GO_VERSION: '(?.*?)'\\n" ], "datasourceTemplate": "golang-version", "depNameTemplate": "golang" - }, { + }, + { "customType": "regex", "description": "Bump golangci-lint version in workflows and the Makefile", - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$","^Makefile$"], + "fileMatch": [ + "^\\.github\\/workflows\\/[^/]+\\.ya?ml$", + "^Makefile$" + ], "matchStrings": [ "GOLANGCI_VERSION: 'v(?.*?)'\\n", "GOLANGCILINT_VERSION = (?.*?)\\n" @@ -42,19 +57,25 @@ "datasourceTemplate": "github-tags", "depNameTemplate": "golangci/golangci-lint", "extractVersionTemplate": "^v(?.*)$" - }, { + }, + { "customType": "regex", "description": "Bump helm version in the Makefile", - "fileMatch": ["^Makefile$"], + "fileMatch": [ + "^Makefile$" + ], "matchStrings": [ "HELM3_VERSION = (?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "helm/helm", - }, { + }, + { "customType": "regex", "description": "Bump kind version in the Makefile", - "fileMatch": ["^Makefile$"], + "fileMatch": [ + "^Makefile$" + ], "matchStrings": [ "KIND_VERSION = (?.*?)\\n" ], @@ -63,19 +84,23 @@ } ], "crossplane": { - "fileMatch": ["(^|/)test/e2e/.*\\.ya?ml$"] + "fileMatch": [ + "(^|/)test/e2e/.*\\.ya?ml$" + ] }, -// PackageRules disabled below should be enabled in case of vulnerabilities + // PackageRules disabled below should be enabled in case of vulnerabilities "vulnerabilityAlerts": { "enabled": true }, "osvVulnerabilityAlerts": true, -// Renovate evaluates all packageRules in order, so low priority rules should -// be at the beginning, high priority at the end + // Renovate evaluates all packageRules in order, so low priority rules should + // be at the beginning, high priority at the end "packageRules": [ { "description": "Generate code after upgrading go dependencies", - "matchDatasources": ["go"], + "matchDatasources": [ + "go" + ], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -91,7 +116,9 @@ }, { "description": "Lint code after upgrading golangci-lint", - "matchDepNames": ["golangci/golangci-lint"], + "matchDepNames": [ + "golangci/golangci-lint" + ], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -106,24 +133,42 @@ }, }, { - "matchManagers": ["crossplane"], - "matchFileNames": ["test/e2e/**"], + "matchManagers": [ + "crossplane" + ], + "matchFileNames": [ + "test/e2e/**" + ], "groupName": "e2e-manifests", - }, { + }, + { "description": "Ignore non-security related updates to release branches", - matchBaseBranches: [ "/^release-.*/"], + matchBaseBranches: [ + "/^release-.*/" + ], enabled: false, - }, { + }, + { "description": "Still update Docker images on release branches though", - "matchDatasources": ["docker"], - matchBaseBranches: [ "/^release-.*/"], + "matchDatasources": [ + "docker" + ], + matchBaseBranches: [ + "/^release-.*/" + ], enabled: true, - }, { + }, + { "description": "Only get Docker image updates every 2 weeks to reduce noise", - "matchDatasources": ["docker"], - "schedule": ["every 2 week on monday"], + "matchDatasources": [ + "docker" + ], + "schedule": [ + "every 2 week on monday" + ], enabled: true, - }, { + }, + { "description": "Ignore k8s.io/client-go older versions, they switched to semantic version and old tags are still available in the repo", "matchDatasources": [ "go" @@ -132,7 +177,8 @@ "k8s.io/client-go" ], "allowedVersions": "<1.0", - }, { + }, + { "description": "Ignore k8s dependencies, should be updated on crossplane-runtime", "matchDatasources": [ "go" @@ -142,17 +188,23 @@ "sigs.k8s.io" ], "enabled": false, - },{ + }, + { "description": "Only get dependency digest updates every month to reduce noise, except crossplane-runtime", - "excludePackageNames": ["github.com/crossplane/crossplane-runtime"], + "excludePackageNames": [ + "github.com/crossplane/crossplane-runtime" + ], "matchDatasources": [ "go" ], "matchUpdateTypes": [ "digest", ], - "extends": ["schedule:monthly"], - }, { + "extends": [ + "schedule:monthly" + ], + }, + { "description": "Ignore oss-fuzz, it's not using tags, we'll stick to master", "matchDepTypes": [ "action" @@ -161,10 +213,13 @@ "google/oss-fuzz" ], "enabled": false - }, { + }, + { "description": "Group all go version updates", - "matchDatasources": ["golang-version"], + "matchDatasources": [ + "golang-version" + ], "groupName": "golang version", } ], -} +} \ No newline at end of file From 3093e1e482ff6d1d4cb21cb0bc4bb6e1248161e4 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 16 May 2024 23:59:03 -0700 Subject: [PATCH 247/370] Replace Makefile with Earthfile Signed-off-by: Nic Cope --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .github/renovate.json5 | 73 ++++- .github/workflows/ci.yml | 376 +++++++---------------- .github/workflows/promote.yml | 49 ++- .gitignore | 1 + .gitmodules | 3 - Earthfile | 399 +++++++++++++++++++++++++ Makefile | 202 ------------- build | 1 - cluster/images/crossplane/Dockerfile | 11 - cluster/images/crossplane/Makefile | 37 --- cluster/local/README.md | 6 - cluster/local/kind.sh | 103 ------- contributing/README.md | 88 ++---- design/one-pager-build-with-earthly.md | 10 +- 15 files changed, 621 insertions(+), 740 deletions(-) create mode 100644 Earthfile delete mode 100644 Makefile delete mode 160000 build delete mode 100644 cluster/images/crossplane/Dockerfile delete mode 100755 cluster/images/crossplane/Makefile delete mode 100644 cluster/local/README.md delete mode 100755 cluster/local/kind.sh diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2da85f389..6f84d7d11 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -19,7 +19,7 @@ Fixes # I have: - [ ] Read and followed Crossplane's [contribution process]. -- [ ] Run `make reviewable` to ensure this PR is ready for review. +- [ ] Run `earthly +reviewable` to ensure this PR is ready for review. - [ ] Added or updated unit tests. - [ ] Added or updated e2e tests. - [ ] Linked a PR or a [docs tracking issue] to [document this change]. diff --git a/.github/renovate.json5 b/.github/renovate.json5 index de2d7118e..f32072250 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -33,26 +33,24 @@ "customManagers": [ { "customType": "regex", - "description": "Bump Go version used in workflows", + "description": "Bump Go version in Earthfile", "fileMatch": [ - "^\\.github\\/workflows\\/[^/]+\\.ya?ml$" + "^Earthfile$" ], "matchStrings": [ - "GO_VERSION: '(?.*?)'\\n" + "ARG --global GO_VERSION=(?.*?)\\n" ], "datasourceTemplate": "golang-version", "depNameTemplate": "golang" }, { "customType": "regex", - "description": "Bump golangci-lint version in workflows and the Makefile", + "description": "Bump golangci-lint version in workflows and the Earthfile", "fileMatch": [ - "^\\.github\\/workflows\\/[^/]+\\.ya?ml$", - "^Makefile$" + "^Earthfile$" ], "matchStrings": [ - "GOLANGCI_VERSION: 'v(?.*?)'\\n", - "GOLANGCILINT_VERSION = (?.*?)\\n" + "ARG GOLANGCI_LINT_VERSION=v(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "golangci/golangci-lint", @@ -60,29 +58,73 @@ }, { "customType": "regex", - "description": "Bump helm version in the Makefile", + "description": "Bump helm version in the Earthfile", "fileMatch": [ - "^Makefile$" + "^Earthfile$" ], "matchStrings": [ - "HELM3_VERSION = (?.*?)\\n" + "ARG HELM_VERSION=v(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "helm/helm", }, { "customType": "regex", - "description": "Bump kind version in the Makefile", + "description": "Bump helm-docs version in the Earthfile", "fileMatch": [ - "^Makefile$" + "^Earthfile$" ], "matchStrings": [ - "KIND_VERSION = (?.*?)\\n" + "ARG HELM_DOCS_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-tags", + "depNameTemplate": "norwoodj/helm-docs", + }, + { + "customType": "regex", + "description": "Bump kind version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG KIND_VERSION=v(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "kubernetes-sigs/kind", - } + }, + { + "customType": "regex", + "description": "Bump gotestsum version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG GOTESTSUM_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-tags", + "depNameTemplate": "gotestyourself/gotestsum", + }, + { + "customType": "regex", + "description": "Bump codeql version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG CODEQL_VERSION=v(?.*?)\\n" + ], + "datasourceTemplate": "github-tags", + "depNameTemplate": "github/codeql-action", + }, ], + // Renovate doesn't have native Earthfile support, but because Earthfile + // syntax is a superset of Dockerfile syntax this works to update FROM images. + // https://github.com/renovatebot/renovate/issues/15975 + "dockerfile": { + "fileMatch:" [ + "(^|/)Earthfile$" + ] + }, "crossplane": { "fileMatch": [ "(^|/)test/e2e/.*\\.ya?ml$" @@ -122,7 +164,6 @@ postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ - "git submodule update --init", "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", "make go.lint", ], diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f410ed7c2..47778f848 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,9 +10,10 @@ on: env: # Common versions - GO_VERSION: '1.22.3' - GOLANGCI_VERSION: 'v1.58.2' - DOCKER_BUILDX_VERSION: 'v0.10.0' + EARTHLY_VERSION: '0.8.11' + + # Force Earthly to use color output + FORCE_COLOR: "1" # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether @@ -21,6 +22,7 @@ env: AWS_USR: ${{ secrets.AWS_USR }} UPBOUND_MARKETPLACE_PUSH_ROBOT_USR: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} + jobs: check-diff: runs-on: ubuntu-22.04 @@ -28,152 +30,90 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-check-diff- - - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Login to DockerHub + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + if: env.DOCKER_USR != '' with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Download Go Modules - run: make modules.download modules.check + - name: Generate Files + run: earthly --strict +generate - - name: Check Diff - run: make check-diff + - name: Count Changed Files + id: changed_files + run: echo "count=$(git status --porcelain | wc -l)" >> $GITHUB_OUTPUT - detect-noop: - runs-on: ubuntu-22.04 - outputs: - noop: ${{ steps.noop.outputs.should_skip }} - steps: - - name: Detect No-op Changes - id: noop - uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 + - name: Fail if Files Changed + if: steps.changed_files.outputs.count != 0 + uses: actions/github-script@v7 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - paths_ignore: '["**.md", "**.png", "**.jpg"]' - do_not_skip: '["workflow_dispatch", "schedule", "push"]' - concurrent_skipping: false + script: core.setFailed('Found changed files after running earthly +generate.'') lint: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-lint- + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Login to DockerHub + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + if: env.DOCKER_USR != '' with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- - - - name: Download Go Modules - run: make modules.download modules.check + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - # We could run 'make lint' to ensure our desired Go version, but we prefer - # this action because it leaves 'annotations' (i.e. it comments on PRs to - # point out linter violations). - name: Lint - uses: golangci/golangci-lint-action@d6238b002a20823d52840fda27e2d4891c5952dc # v4 - with: - version: ${{ env.GOLANGCI_VERSION }} - skip-cache: true # We do our own caching. + run: earthly --strict +lint codeql: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-check-diff- + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Login to DockerHub + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + if: env.DOCKER_USR != '' with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Download Go Modules - run: make modules.download modules.check + - name: Run CodeQL + run: earthly --strict +ci-codeql - - name: Initialize CodeQL - uses: github/codeql-action/init@9fdb3e49720b44c48891d036bb502feb25684276 # v3 + - name: Upload CodeQL Results to GitHub + uses: github/codeql-action/upload-sarif@v3 with: - languages: go + sarif_file: '_output/codeql/go.sarif' - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@9fdb3e49720b44c48891d036bb502feb25684276 # v3 trivy-scan-fs: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - name: Run Trivy vulnerability scanner in fs mode uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 @@ -193,58 +133,36 @@ jobs: unit-tests: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - - name: Fetch History - run: git fetch --prune --unshallow - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-unit-tests- - - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Login to DockerHub + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + if: env.DOCKER_USR != '' with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- - - - name: Download Go Modules - run: make modules.download modules.check + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - name: Run Unit Tests - run: make -j2 test + run: earthly --strict +test - name: Publish Unit Test Coverage uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4 with: flags: unittests - file: _output/tests/linux_amd64/coverage.txt + file: _output/tests/coverage.txt token: ${{ secrets.CODECOV_TOKEN }} e2e-tests: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' strategy: fail-fast: false matrix: @@ -256,71 +174,25 @@ jobs: - realtime-compositions steps: - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3 - with: - platforms: all - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3 - with: - version: ${{ env.DOCKER_BUILDX_VERSION }} - install: true - - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - name: Fetch History - run: git fetch --prune --unshallow - - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-e2e-tests-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-e2e-tests- - - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Login to DockerHub + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + if: env.DOCKER_USR != '' with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-pkg- - - - name: Download Go Modules - run: make modules.download modules.check - - - name: Build Helm Chart - run: make -j2 build - env: - # We're using docker buildx, which doesn't actually load the images it - # builds by default. Specifying --load does so. - BUILD_ARGS: "--load" + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - name: Run E2E Tests - run: make e2e E2E_TEST_FLAGS="-test.v -test.failfast -fail-fast --kind-logs-location ./logs-kind --test-suite ${{ matrix.test-suite }}" + run: earthly --strict --allow-privileged +e2e --FLAGS="-test.failfast -fail-fast --test-suite ${{ matrix.test-suite }}" - - name: Upload artifacts - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 - if: failure() - with: - name: e2e-kind-logs-${{ matrix.test-suite }} - path: ./logs-kind - if-no-files-found: error - retention-days: 7 - - name: Publish E2E Test Flakes if: '!cancelled()' uses: buildpulse/buildpulse-action@d0d30f53585cf16b2e01811a5a753fd47968654a # v0.11.0 @@ -329,12 +201,10 @@ jobs: repository: 147886080 key: ${{ secrets.BUILDPULSE_ACCESS_KEY_ID }} secret: ${{ secrets.BUILDPULSE_SECRET_ACCESS_KEY }} - path: _output/tests/linux_amd64/e2e-tests.xml + path: _output/tests/e2e-tests.xml publish-artifacts: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Cleanup Disk @@ -344,66 +214,23 @@ jobs: dotnet: true haskell: true tool-cache: true - large-packages: false swap-storage: false - - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3 - with: - platforms: all - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3 - with: - version: ${{ env.DOCKER_BUILDX_VERSION }} - install: true + # This works, and saves ~5GiB, but takes ~2 minutes to do it. + large-packages: false + # TODO(negz): Does having these around avoid Earthly needing to pull + # large images like golang? + docker-images: false - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: - submodules: true - - - name: Fetch History - run: git fetch --prune --unshallow - - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 - with: - go-version: ${{ env.GO_VERSION }} + fetch-depth: 0 - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-publish-artifacts- - - - name: Cache Go Dependencies - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- - - - name: Download Go Modules - run: make modules.download modules.check - - - name: Build Artifacts - run: make -j2 build.all - env: - # We're using docker buildx, which doesn't actually load the images it - # builds by default. Specifying --load does so. - BUILD_ARGS: "--load" - - - name: Publish Artifacts to GitHub - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 - with: - name: output - path: _output/** + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 @@ -420,31 +247,40 @@ jobs: username: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} - - name: Publish Artifacts to S3, Marketplace, DockerHub - run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/} - if: env.AWS_USR != '' && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' + - name: Enable Earthly to Push Artifacts + if: env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' + run: echo "EARTHLY_PUSH=true" > $GITHUB_ENV + + - name: Set CROSSPLANE_VERSION GitHub Environment Variable + run: earthly +ci-version + + - name: Build and Push Artifacts + run: earthly --strict +ci-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} + + - name: Push Artifacts to https://releases.crossplane.io/build/ + if: env.AWS_USR != '' + run: earthly --strict +ci-push-build-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} env: + AWS_DEFAULT_REGION: us-east-1 AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} - AWS_DEFAULT_REGION: us-east-1 - GIT_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DOCS_GIT_USR: ${{ secrets.UPBOUND_BOT_GITHUB_USR }} - DOCS_GIT_PSW: ${{ secrets.UPBOUND_BOT_GITHUB_PSW }} - - name: Promote Artifacts in S3, DockerHub - if: github.ref == 'refs/heads/master' && env.AWS_USR != '' && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' - run: make -j2 promote + - name: Push Artifacts to https://releases.crossplane.io/master/ and https://charts.crossplane.io/master + if: env.AWS_USR != '' && github.ref == 'refs/heads/master' + run: earthly --strict +ci-promote-build-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --CHANNEL=master env: - BRANCH_NAME: master - CHANNEL: master + AWS_DEFAULT_REGION: us-east-1 AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} - AWS_DEFAULT_REGION: us-east-1 + + - name: Upload Artifacts to GitHub + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + with: + name: output + path: _output/** fuzz-test: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: # TODO(negz): Can we make this use our Go build and dependency cache? It @@ -472,8 +308,6 @@ jobs: protobuf-schemas: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout @@ -483,7 +317,7 @@ jobs: uses: bufbuild/buf-setup-action@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - + - name: Lint Protocol Buffers uses: bufbuild/buf-lint-action@v1 with: @@ -498,7 +332,7 @@ jobs: with: input: apis against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=master,subdir=apis" - + - name: Push Protocol Buffers to Buf Schema Registry if: ${{ github.repository == 'crossplane/crossplane' && github.ref == 'refs/heads/master' }} uses: bufbuild/buf-push-action@v1 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index ee28e24fb..4b2ed27c7 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -12,7 +12,6 @@ on: default: 'stable' # Note: For pre-releases, we want to promote the pre-release version to # the (stable) channel, but not set it as the "current" version. - # See: https://github.com/upbound/build/pull/243 pre-release: type: boolean description: 'This is a pre-release' @@ -21,7 +20,7 @@ on: env: # Common versions - GO_VERSION: '1.22.3' + EARTHLY_VERSION: '0.8.11' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether @@ -37,39 +36,33 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: true - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Fetch History - run: git fetch --prune --unshallow + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + - name: Promote Image to docker.io/crossplane/crossplane:${{ inputs.channel }} if: env.DOCKER_USR != '' - with: - username: ${{ secrets.DOCKER_USR }} - password: ${{ secrets.DOCKER_PSW }} + run: | + earthly --strict \ + --secret DOCKER_USER=${{ secrets.DOCKER_USR }} \ + --secret DOCKER_PASSWORD=${{ secrets.DOCKER_PSW }} \ + +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=docker.io/crossplane/crossplane - - name: Login to Upbound - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + - name: Promote Image to xpkg.upbound.io/crossplane/crossplane:${{ inputs.channel }} if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' - with: - registry: xpkg.upbound.io - username: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} - password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} + run: | + earthly --strict \ + --secret DOCKER_USER=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} \ + --secret DOCKER_PASSWORD=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} \ + +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=xpkg.upbound.io/crossplane/crossplane - - name: Promote Artifacts in S3, DockerHub, and Upbound Registry - if: env.AWS_USR != '' && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' - run: make -j2 promote BRANCH_NAME=${GITHUB_REF##*/} + - name: Promote Build Artifacts to https://releases.crossplane.io/${{ inputs.channel }} + if: env.AWS_USR != '' + run: earthly --strict +ci-promote-build-artifacts --CHANNEL=${{ inputs.channel }} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} env: - VERSION: ${{ github.event.inputs.version }} - CHANNEL: ${{ github.event.inputs.channel }} - PRE_RELEASE: ${{ github.event.inputs.pre-release }} + AWS_DEFAULT_REGION: us-east-1 AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} - AWS_DEFAULT_REGION: us-east-1 diff --git a/.gitignore b/.gitignore index 8ef5e3bad..31d0f1988 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /.cache /.work +/.hack /_output /config/ /config diff --git a/.gitmodules b/.gitmodules index 8f84209c8..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "build"] - path = build - url = https://github.com/crossplane/build diff --git a/Earthfile b/Earthfile new file mode 100644 index 000000000..6ad5f3d04 --- /dev/null +++ b/Earthfile @@ -0,0 +1,399 @@ +# See https://docs.earthly.dev/docs/earthfile/features +VERSION --try --raw-output --run-with-aws 0.8 + +PROJECT crossplane/crossplane + +ARG --global GO_VERSION=1.22.3 + +# reviewable checks that a branch is ready for review. Run it before opening a +# pull request. It will catch a lot of the things our CI workflow will catch. +reviewable: + WAIT + BUILD +generate + END + BUILD +lint + BUILD +test + +# test runs unit tests. +test: + BUILD +go-test + +# lint runs linters. +lint: + BUILD +go-lint + BUILD +helm-lint + +# build builds Crossplane for your native OS and architecture. +build: + BUILD +image + BUILD +helm-build + +# multiplatform-build builds Crossplane for all supported OS and architectures. +multiplatform-build: + BUILD +go-multiplatform-build + BUILD +multiplatform-image + BUILD +helm-build + +# generate runs code generation. To keep builds fast, it doesn't run as part of +# the build target. It's important to run it explicitly when code needs to be +# generated, for example when you update an API type. +generate: + BUILD +go-modules-tidy + BUILD +go-generate + BUILD +helm-generate + +# e2e runs end-to-end tests. See test/e2e/README.md for details. +e2e: + ARG --required FLAGS + # Docker installs faster on Alpine, and we only need Go for go tool test2json. + FROM golang:${GO_VERSION}-alpine3.20 + RUN apk add --no-cache docker jq + COPY +helm-setup/helm /usr/local/bin/helm + COPY +kind-setup/kind /usr/local/bin/kind + COPY +gotestsum-setup/gotestsum /usr/local/bin/gotestsum + COPY +go-build-e2e/e2e . + COPY --dir cluster test . + WITH DOCKER --load crossplane-e2e/crossplane:latest=+image + TRY + # TODO(negz:) Set GITHUB_ACTIONS=true and use RUN --raw-output when + # https://github.com/earthly/earthly/issues/4143 is fixed. + RUN gotestsum --no-color=false --format testname --junitfile e2e-tests.xml --raw-command go tool test2json -t -p E2E ./e2e -test.v ${FLAGS} + FINALLY + SAVE ARTIFACT --if-exists e2e-tests.xml AS LOCAL _output/tests/e2e-tests.xml + END + END + +# hack builds Crossplane, and deploys it to a kind cluster. It runs in your +# local environment, not a container. The kind cluster will keep running until +# you run the unhack target. Run hack again to rebuild Crossplane and restart +# the kind cluster with the new build. +hack: + # TODO(negz): This could run an interactive shell inside a temporary container + # once https://github.com/earthly/earthly/issues/3206 is fixed. + LOCALLY + WAIT + BUILD +unhack + END + COPY +helm-setup/helm .hack/helm + COPY +kind-setup/kind .hack/kind + COPY (+helm-build/output --CROSSPLANE_VERSION=v0.0.0-hack) .hack/charts + WITH DOCKER --load crossplane-hack/crossplane:hack=+image + RUN \ + .hack/kind create cluster --name crossplane-hack && \ + .hack/kind load docker-image --name crossplane-hack crossplane-hack/crossplane:hack && \ + .hack/helm install --create-namespace --namespace crossplane-system crossplane .hack/charts/crossplane-0.0.0-hack.tgz \ + --set "image.pullPolicy=Never,image.repository=crossplane-hack/crossplane,image.tag=hack" \ + --set "args={--debug}" + END + RUN docker image rm crossplane-hack/crossplane:hack + RUN rm -rf .hack + +# unhack deletes the kind cluster created by the hack target. +unhack: + LOCALLY + COPY +kind-setup/kind .hack/kind + RUN .hack/kind delete cluster --name crossplane-hack + RUN rm -rf .hack + +# go-modules downloads Crossplane's go modules. It's the base target of most Go +# related target (go-build, etc). +go-modules: + ARG NATIVEPLATFORM + FROM --platform=${NATIVEPLATFORM} golang:${GO_VERSION} + WORKDIR /crossplane + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY go.mod go.sum ./ + RUN go mod download + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +# go-modules-tidy tidies and verifies go.mod and go.sum. +go-modules-tidy: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ test/ . + RUN go mod tidy + RUN go mod verify + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +# go-generate runs Go code generation. +go-generate: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir hack/ apis/ internal/ . + RUN go generate -tags 'generate' ./apis/... + SAVE ARTIFACT apis/ AS LOCAL apis + +# go-build builds Crossplane binaries for your native OS and architecture. +go-build: + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + ARG NATIVEPLATFORM + ARG TARGETARCH + ARG TARGETOS + ARG GOARCH=${TARGETARCH} + ARG GOOS=${TARGETOS} + ARG GOFLAGS="-ldflags=-X=github.com/crossplane/crossplane/internal/version.version=${CROSSPLANE_VERSION}" + ARG CGO_ENABLED=0 + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN go build -o crossplane ./cmd/crossplane + RUN go build -o crank ./cmd/crank + SAVE ARTIFACT crossplane AS LOCAL _output/bin/${GOOS}_${GOARCH}/crossplane + SAVE ARTIFACT crank AS LOCAL _output/bin/${GOOS}_${GOARCH}/crank + +# go-multiplatform-build builds Crossplane binaries for all supported OS +# and architectures. +go-multiplatform-build: + BUILD \ + --platform=linux/amd64 \ + --platform=linux/arm64 \ + --platform=linux/arm \ + --platform=linux/ppc64le \ + --platform=darwin/arm64 \ + --platform=darwin/amd64 \ + --platform=windows/amd64 \ + +go-build + +# go-build-e2e builds Crossplane's end-to-end tests. +go-build-e2e: + ARG CGO_ENABLED=0 + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ internal/ test/ . + RUN go test -c -o e2e ./test/e2e + SAVE ARTIFACT e2e + +# go-test runs Go unit tests. +go-test: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN go test -covermode=count -coverprofile=coverage.txt ./... + SAVE ARTIFACT coverage.txt AS LOCAL _output/tests/coverage.txt + +# go-lint lints Go code. +go-lint: + ARG GOLANGCI_LINT_VERSION=v1.58.2 + FROM +go-modules + # This cache is private because golangci-lint doesn't support concurrent runs. + CACHE --id go-lint --sharing private /root/.cache/golangci-lint + CACHE --id go-build --sharing shared /root/.cache/go-build + RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VERSION} + COPY .golangci.yml . + COPY --dir apis/ cmd/ internal/ pkg/ test/ . + RUN golangci-lint run --fix + SAVE ARTIFACT apis AS LOCAL apis + SAVE ARTIFACT cmd AS LOCAL cmd + SAVE ARTIFACT internal AS LOCAL internal + SAVE ARTIFACT pkg AS LOCAL pkg + SAVE ARTIFACT test AS LOCAL test + +# image builds the Crossplane OCI image for your native architecture. +image: + ARG EARTHLY_GIT_BRANCH + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_REPO=build-${EARTHLY_GIT_SHORT_HASH}/crossplane + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + ARG NATIVEPLATFORM + ARG TARGETPLATFORM + ARG TARGETARCH + ARG TARGETOS + FROM --platform=${TARGETPLATFORM} gcr.io/distroless/static@sha256:41972110a1c1a5c0b6adb283e8aa092c43c31f7c5d79b8656fbffff2c3e61f05 + COPY --platform=${NATIVEPLATFORM} (+go-build/crossplane --GOOS=${TARGETOS} --GOARCH=${TARGETARCH}) /usr/local/bin/ + COPY --dir cluster/crds/ /crds + COPY --dir cluster/webhookconfigurations/ /webhookconfigurations + EXPOSE 8080 + USER 65532 + ENTRYPOINT ["crossplane"] + SAVE IMAGE --push ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION} + SAVE IMAGE --push ${CROSSPLANE_REPO}:${EARTHLY_GIT_BRANCH} + +# multiplatform-image builds the Crossplane OCI image for all supported +# architectures. +multiplatform-image: + BUILD \ + --platform=linux/amd64 \ + --platform=linux/arm64 \ + --platform=linux/arm \ + --platform=linux/ppc64le \ + +image + +# helm-lint lints the Crossplane Helm chart. +helm-lint: + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-setup/helm /usr/local/bin/helm + COPY cluster/charts/crossplane/ . + RUN --entrypoint helm lint + +# helm-generate runs Helm code generation - specifically helm-docs. +helm-generate: + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-docs-setup/helm-docs /usr/local/bin/helm-docs + COPY cluster/charts/crossplane/ . + RUN helm-docs + SAVE ARTIFACT . AS LOCAL cluster/charts/crossplane + +# helm-build packages the Crossplane Helm chart. +helm-build: + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-setup/helm /usr/local/bin/helm + COPY cluster/charts/crossplane/ . + # We strip the leading v from Helm chart versions. + LET CROSSPLANE_CHART_VERSION=$(echo ${CROSSPLANE_VERSION}|sed -e 's/^v//') + RUN helm dependency update + RUN helm package --version ${CROSSPLANE_CHART_VERSION} --app-version ${CROSSPLANE_CHART_VERSION} -d output . + SAVE ARTIFACT output AS LOCAL _output/charts + +# kind-setup is used by other targets to setup kind. +kind-setup: + ARG KIND_VERSION=v0.21.0 + ARG NATIVEOS + ARG NATIVEARCH + FROM curlimages/curl:8.8.0 + RUN curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${NATIVEOS}-${NATIVEARCH}&&chmod +x kind + SAVE ARTIFACT kind + +# gotestsum-setup is used by other targets to setup gotestsum. +gotestsum-setup: + ARG GOTESTSUM_VERSION=1.11.0 + ARG NATIVEOS + ARG NATIVEARCH + FROM curlimages/curl:8.8.0 + RUN curl -fsSL https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${NATIVEOS}_${NATIVEARCH}.tar.gz|tar zx>gotestsum + SAVE ARTIFACT gotestsum + +# helm-docs-setup is used by other targets to setup helm-docs. +helm-docs-setup: + ARG HELM_DOCS_VERSION=1.11.0 + ARG NATIVEOS + ARG NATIVEARCH + FROM curlimages/curl:8.8.0 + IF [ "${NATIVEARCH}" = "amd64" ] + LET ARCH=x86_64 + ELSE + LET ARCH=${NATIVEARCH} + END + RUN curl -fsSL https://github.com/norwoodj/helm-docs/releases/download/v${HELM_DOCS_VERSION}/helm-docs_${HELM_DOCS_VERSION}_${NATIVEOS}_${ARCH}.tar.gz|tar zx>helm-docs + SAVE ARTIFACT helm-docs + +# helm-setup is used by other targets to setup helm. +helm-setup: + ARG HELM_VERSION=v3.14.4 + ARG NATIVEOS + ARG NATIVEARCH + FROM curlimages/curl:8.8.0 + RUN curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-${NATIVEOS}-${NATIVEARCH}.tar.gz|tar zx --strip-components=1 + SAVE ARTIFACT helm + +# Targets below this point are intended only for use in GitHub Actions CI. They +# may not work outside of that environment. For example they may depend on +# secrets that are only availble in the CI environment. Targets below this point +# must be prefixed with ci-. + +# TODO(negz): Is there a better way to determine the Crossplane version? +# This versioning approach maintains compatibility with the build submodule. See +# https://github.com/crossplane/build/blob/231258/makelib/common.mk#L205. This +# approach is problematic in Earthly because computing it inside a containerized +# target requires copying the entire git repository into the container. Doing so +# would invalidate all dependent target caches any time any file in git changed. + +# ci-version is used by CI to set the CROSSPLANE_VERSION environment variable. +ci-version: + LOCALLY + RUN echo "CROSSPLANE_VERSION=$(git describe --dirty --always --tags|sed -e 's/-/./2g')" > $GITHUB_ENV + +# ci-artifacts is used by CI to build and push the Crossplane image, chart, and +# binaries. +ci-artifacts: + BUILD +multiplatform-build \ + --CROSSPLANE_REPO=index.docker.io/crossplane/crossplane \ + --CROSSPLANE_REPO=xpkg.upbound.io/crossplane/crossplane + +# ci-codeql-setup sets up CodeQL for the ci-codeql target. +ci-codeql-setup: + ARG CODEQL_VERSION=v2.17.3 + FROM curlimages/curl:8.8.0 + RUN curl -fsSL https://github.com/github/codeql-action/releases/download/codeql-bundle-${CODEQL_VERSION}/codeql-bundle-linux64.tar.gz|tar zx + SAVE ARTIFACT codeql + +# ci-codeql is used by CI to build Crossplane with CodeQL scanning enabled. +ci-codeql: + ARG CGO_ENABLED=0 + ARG NATIVEOS + ARG NATIVEARCH + FROM +go-modules + IF [ "${NATIVEARCH}" = "arm64" ] && [ "${NATIVEOS}" = "linux" ] + RUN --no-cache echo "CodeQL doesn't support Linux on Apple Silicon" && false + END + COPY --dir +ci-codeql-setup/codeql /codeql + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN /codeql/codeql database create /codeqldb --language=go + RUN /codeql/codeql database analyze /codeqldb --threads=0 --format=sarif-latest --output=go.sarif --sarif-add-baseline-file-info + SAVE ARTIFACT go.sarif AS LOCAL _output/codeql/go.sarif + +# ci-promote-image is used by CI to promote a Crossplane image to a channel. +# In practice, this means creating a new channel tag (e.g. master or stable) +# that points to the supplied version. +ci-promote-image: + ARG --required CROSSPLANE_REPO + ARG --required CROSSPLANE_VERSION + ARG --required CHANNEL + FROM alpine:3.20 + RUN apk add docker + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} + RUN --push docker buildx imagetools create \ + --tag ${CROSSPLANE_REPO}:${CHANNEL} \ + --tag ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION}-${CHANNEL} \ + ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION} + +# TODO(negz): Ideally ci-push-build-artifacts would be merged into ci-artifacts, +# i.e. just build and push them all in the same target. Currently we're relying +# on the fact that ci-artifacts does a bunch of SAVE ARTIFACT AS LOCAL, which +# ci-push-build-artifacts then loads. That's an anti-pattern in Earthly. We're +# supposed to use COPY instead, but I'm not sure how to COPY artifacts from a +# matrix build. + +# ci-push-build-artifacts is used by CI to push binary artifacts to S3. +ci-push-build-artifacts: + ARG --required CROSSPLANE_VERSION + ARG ARTIFACTS_DIR=_output + ARG EARTHLY_GIT_BRANCH + ARG BUCKET_RELEASES=crossplane.releases + FROM amazon/aws-cli:2.15.57 + COPY --dir ${ARTIFACTS_DIR} artifacts + RUN --push --aws aws s3 sync --delete artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} + +# ci-promote-build-artifacts is used by CI to promote binary artifacts and Helm +# charts to a channel. In practice, this means copying them from one S3 +# directory to another. +ci-promote-build-artifacts: + ARG --required CROSSPLANE_VERSION + ARG --required CHANNEL + ARG HELM_REPO_URL=https://charts.crossplane.io + ARG EARTHLY_GIT_BRANCH + ARG BUCKET_RELEASES=crossplane.releases + ARG BUCKET_CHARTS=crossplane.charts + ARG PRERELEASE=false + FROM amazon/aws-cli:2.15.57 + COPY +helm-setup/helm /usr/local/bin/helm + RUN --aws aws s3 sync s3://${BUCKET_CHARTS}/${CHANNEL} repo + RUN --aws aws s3 sync s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo + RUN helm repo index --url ${HELM_REPO_URL} repo + RUN --push --aws aws s3 sync --delete repo s3://${BUCKET_CHARTS}/${CHANNEL} + RUN --push --aws aws s3 cp "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml + RUN --push --aws aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} + IF [ "${PRERELEASE}" = "false" ] + RUN --push --aws aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current + END diff --git a/Makefile b/Makefile deleted file mode 100644 index 08c322816..000000000 --- a/Makefile +++ /dev/null @@ -1,202 +0,0 @@ -# ==================================================================================== -# Setup Project - -PROJECT_NAME := crossplane -PROJECT_REPO := github.com/crossplane/$(PROJECT_NAME) - -PLATFORMS ?= linux_amd64 linux_arm64 linux_arm linux_ppc64le darwin_amd64 darwin_arm64 windows_amd64 -# -include will silently skip missing files, which allows us -# to load those files with a target in the Makefile. If only -# "include" was used, the make command would fail and refuse -# to run a target until the include commands succeeded. --include build/makelib/common.mk - -# ==================================================================================== -# Setup Output - -S3_BUCKET ?= crossplane.releases --include build/makelib/output.mk - -# ==================================================================================== -# Setup Go - -# Set a sane default so that the nprocs calculation below is less noisy on the initial -# loading of this file -NPROCS ?= 1 - -# each of our test suites starts a kube-apiserver and running many test suites in -# parallel can lead to high CPU utilization. by default we reduce the parallelism -# to half the number of CPU cores. -GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 ))) - -GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/crossplane $(GO_PROJECT)/cmd/crank -GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e -GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION) -GO_SUBDIRS += cmd internal apis pkg -GO111MODULE = on -GOLANGCILINT_VERSION = 1.58.2 -GO_LINT_ARGS ?= "--fix" - --include build/makelib/golang.mk - -# ==================================================================================== -# Setup Kubernetes tools - -HELM_VERSION = v3.14.4 -KIND_VERSION = v0.21.0 --include build/makelib/k8s_tools.mk - -# ==================================================================================== -# Setup Helm - -HELM_BASE_URL = https://charts.crossplane.io -HELM_S3_BUCKET = crossplane.charts -HELM_CHARTS = crossplane -HELM_CHART_LINT_ARGS_crossplane = --set nameOverride='',imagePullSecrets='' -HELM_DOCS_ENABLED = true -HELM_VALUES_TEMPLATE_SKIPPED = true --include build/makelib/helm.mk - -# ==================================================================================== -# Setup Images -# Due to the way that the shared build logic works, images should -# all be in folders at the same level (no additional levels of nesting). - -REGISTRY_ORGS ?= docker.io/crossplane xpkg.upbound.io/crossplane -IMAGES = crossplane --include build/makelib/imagelight.mk - -# ==================================================================================== -# Targets - -# run `make help` to see the targets and options - -# We want submodules to be set up the first time `make` is run. -# We manage the build/ folder and its Makefiles as a submodule. -# The first time `make` is run, the includes of build/*.mk files will -# all fail, and this target will be run. The next time, the default as defined -# by the includes will be run instead. -fallthrough: submodules - @echo Initial setup complete. Running make again . . . - @make - -CRD_DIR = cluster/crds -CRD_PATCH_DIR = cluster/crd-patches - -# See patch files for details. -crds.patch: $(KUBECTL) - @$(INFO) patching generated CRDs - @mkdir -p $(WORK_DIR)/patch - @$(KUBECTL) patch --local --type=json -f $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml --patch-file $(CRD_PATCH_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml -o yaml > $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml - @mv $(WORK_DIR)/patch/pkg.crossplane.io_deploymentruntimeconfigs.yaml $(CRD_DIR)/pkg.crossplane.io_deploymentruntimeconfigs.yaml - @$(OK) patched generated CRDs - -crds.clean: - @$(INFO) cleaning generated CRDs - @find $(CRD_DIR) -name '*.yaml' -exec sed -i.sed -e '1,1d' {} \; || $(FAIL) - @find $(CRD_DIR) -name '*.yaml.sed' -delete || $(FAIL) - @$(OK) cleaned generated CRDs - -generate.run: gen-kustomize-crds gen-chart-license - -gen-chart-license: - @cp -f LICENSE cluster/charts/crossplane/LICENSE - -generate.done: crds.clean crds.patch - -gen-kustomize-crds: - @$(INFO) Adding all CRDs to Kustomize file for local development - @rm cluster/kustomization.yaml - @echo "# This kustomization can be used to remotely install all Crossplane CRDs" >> cluster/kustomization.yaml - @echo "# by running kubectl apply -k https://github.com/crossplane/crossplane//cluster?ref=master" >> cluster/kustomization.yaml - @echo "resources:" >> cluster/kustomization.yaml - @find $(CRD_DIR) -type f -name '*.yaml' | sort | \ - while read filename ;\ - do echo "- $${filename#*/}" >> cluster/kustomization.yaml \ - ; done - @$(OK) All CRDs added to Kustomize file for local development - -e2e-tag-images: - @$(INFO) Tagging E2E test images - @docker tag $(BUILD_REGISTRY)/$(PROJECT_NAME)-$(TARGETARCH) crossplane-e2e/$(PROJECT_NAME):latest || $(FAIL) - @$(OK) Tagged E2E test images - -# NOTE(negz): There's already a go.test.integration target, but it's weird. -# This relies on make build building the e2e binary. -E2E_TEST_FLAGS ?= - -# TODO(negz): Ideally we'd just tell the E2E tests which CLI tools to invoke. -# https://github.com/kubernetes-sigs/e2e-framework/issues/282 -E2E_PATH = $(WORK_DIR)/e2e - -GOTESTSUM_VERSION ?= v1.11.0 -GOTESTSUM := $(TOOLS_HOST_DIR)/gotestsum - -$(GOTESTSUM): - @$(INFO) installing gotestsum - @GOBIN=$(TOOLS_HOST_DIR) $(GOHOST) install gotest.tools/gotestsum@$(GOTESTSUM_VERSION) || $(FAIL) - @$(OK) installed gotestsum - -e2e-run-tests: - @$(INFO) Run E2E tests - @mkdir -p $(E2E_PATH) - @ln -sf $(KIND) $(E2E_PATH)/kind - @ln -sf $(HELM) $(E2E_PATH)/helm - @PATH="$(E2E_PATH):${PATH}" $(GOTESTSUM) --format testname --junitfile $(GO_TEST_OUTPUT)/e2e-tests.xml --raw-command -- $(GO) tool test2json -t -p e2e $(GO_TEST_OUTPUT)/e2e -test.v $(E2E_TEST_FLAGS) || $(FAIL) - @$(OK) Run E2E tests - -e2e.init: build e2e-tag-images - -e2e.run: $(GOTESTSUM) $(KIND) $(HELM3) e2e-run-tests - -# Update the submodules, such as the common build scripts. -submodules: - @git submodule sync - @git submodule update --init --recursive - -# Install CRDs into a cluster. This is for convenience. -install-crds: $(KUBECTL) reviewable - $(KUBECTL) apply -f $(CRD_DIR) - -# Uninstall CRDs from a cluster. This is for convenience. -uninstall-crds: - $(KUBECTL) delete -f $(CRD_DIR) - -# NOTE(hasheddan): the build submodule currently overrides XDG_CACHE_HOME in -# order to force the Helm 3 to use the .work/helm directory. This causes Go on -# Linux machines to use that directory as the build cache as well. We should -# adjust this behavior in the build submodule because it is also causing Linux -# users to duplicate their build cache, but for now we just make it easier to -# identify its location in CI so that we cache between builds. -go.cachedir: - @go env GOCACHE - -# This is for running out-of-cluster locally, and is for convenience. Running -# this make target will print out the command which was used. For more control, -# try running the binary directly with different arguments. -run: go.build - @$(INFO) Running Crossplane locally out-of-cluster . . . - @# To see other arguments that can be provided, run the command with --help instead - $(GO_OUT_DIR)/$(PROJECT_NAME) core start --debug - -.PHONY: manifests submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile e2e.test.images - -# ==================================================================================== -# Special Targets - -define CROSSPLANE_MAKE_HELP -Crossplane Targets: - submodules Update the submodules, such as the common build scripts. - run Run crossplane locally, out-of-cluster. Useful for development. - -endef -# The reason CROSSPLANE_MAKE_HELP is used instead of CROSSPLANE_HELP is because the crossplane -# binary will try to use CROSSPLANE_HELP if it is set, and this is for something different. -export CROSSPLANE_MAKE_HELP - -crossplane.help: - @echo "$$CROSSPLANE_MAKE_HELP" - -help-special: crossplane.help - -.PHONY: crossplane.help help-special diff --git a/build b/build deleted file mode 160000 index 231258db2..000000000 --- a/build +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 231258db281237379d8ec0c6e4af9d7c1ae5cc4a diff --git a/cluster/images/crossplane/Dockerfile b/cluster/images/crossplane/Dockerfile deleted file mode 100644 index 1d4ad3eae..000000000 --- a/cluster/images/crossplane/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM gcr.io/distroless/static@sha256:41972110a1c1a5c0b6adb283e8aa092c43c31f7c5d79b8656fbffff2c3e61f05 - -ARG TARGETOS -ARG TARGETARCH - -COPY bin/$TARGETOS\_$TARGETARCH/crossplane /usr/local/bin/ -COPY crds /crds -COPY webhookconfigurations /webhookconfigurations -EXPOSE 8080 -USER 65532 -ENTRYPOINT ["crossplane"] diff --git a/cluster/images/crossplane/Makefile b/cluster/images/crossplane/Makefile deleted file mode 100755 index 72f26d2d6..000000000 --- a/cluster/images/crossplane/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# ==================================================================================== -# Setup Project - -include ../../../build/makelib/common.mk - -# ==================================================================================== -# Options - -include ../../../build/makelib/imagelight.mk - -# ==================================================================================== -# Targets - -img.build: - @$(INFO) docker build $(IMAGE) - @$(MAKE) BUILD_ARGS="--load" img.build.shared - @$(OK) docker build $(IMAGE) - -img.publish: - @$(INFO) docker publish $(IMAGE) - @$(MAKE) BUILD_ARGS="--push" img.build.shared - @$(OK) docker publish $(IMAGE) - -img.build.shared: - @cp Dockerfile $(IMAGE_TEMP_DIR) || $(FAIL) - @cp -r $(OUTPUT_DIR)/bin/ $(IMAGE_TEMP_DIR)/bin || $(FAIL) - @cp -a ../../../cluster/crds $(IMAGE_TEMP_DIR) || $(FAIL) - @cp -a ../../../cluster/webhookconfigurations $(IMAGE_TEMP_DIR) || $(FAIL) - @docker buildx build $(BUILD_ARGS) \ - --platform $(IMAGE_PLATFORMS) \ - -t $(IMAGE) \ - $(IMAGE_TEMP_DIR) || $(FAIL) - -img.promote: - @$(INFO) docker promote $(FROM_IMAGE) to $(TO_IMAGE) - @docker buildx imagetools create -t $(TO_IMAGE) $(FROM_IMAGE) - @$(OK) docker promote $(FROM_IMAGE) to $(TO_IMAGE) diff --git a/cluster/local/README.md b/cluster/local/README.md deleted file mode 100644 index 53e098138..000000000 --- a/cluster/local/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Deploying Crossplane Locally - -This directory contains scripts that automate common local development flows for -Crossplane, allowing you to deploy your local build of Crossplane to a `kind` -cluster. Run [kind.sh](./kind.sh) to setup a single-node kind Kubernetes -cluster. diff --git a/cluster/local/kind.sh b/cluster/local/kind.sh deleted file mode 100755 index 37721f26d..000000000 --- a/cluster/local/kind.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env bash - -set -e - -scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# shellcheck disable=SC1090 -projectdir="${scriptdir}/../.." - -# get the build environment variables from the special build.vars target in the main makefile -eval $(make --no-print-directory -C ${scriptdir}/../.. build.vars) - -# ensure the tools we need are installed -make ${KIND} ${KUBECTL} ${HELM3} - -BUILD_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-${TARGETARCH}" -DEFAULT_NAMESPACE="crossplane-system" - -function copy_image_to_cluster() { - local build_image=$1 - local final_image=$2 - local kind_name=$3 - docker tag "${build_image}" "${final_image}" - ${KIND} --name "${kind_name}" load docker-image "${final_image}" - echo "Tagged image: ${final_image}" -} - -# Deletes pods with application prefix. Namespace is expected as the first argument -function delete_pods() { - for pod in $(kubectl get pods -n "$2" -l "app=$1" --no-headers -o custom-columns=NAME:.metadata.name); do - kubectl delete pod "$pod" -n "$2" - done -} - -# current kubectl context == kind-kind, returns boolean -function check_context() { - if [ "$(kubectl config view 2>/dev/null | awk '/current-context/ {print $NF}')" = "kind-kind" ]; then - return 0 - fi - - return 1 -} - -# configure kind -KIND_NAME=${KIND_NAME:-"kind"} -IMAGE_REPOSITORY="xpkg.upbound.io/${PROJECT_NAME}/${PROJECT_NAME}" -case "${1:-}" in - up) - ${KIND} create cluster --name "${KIND_NAME}" --image "${KUBE_IMAGE}" --wait 5m - ;; - update) - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - ;; - restart) - if check_context; then - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "Restarting \"${PROJECT_NAME}\" deployment pods in \"$ns\" namespace." - delete_pods ${PROJECT_NAME} ${ns} - else - echo "To prevent accidental data loss acting only on 'kind-kind' context. No action is taken." - fi - ;; - helm-install) - echo "copying image for helm" - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "installing helm package into \"$ns\" namespace" - ${HELM3} install ${PROJECT_NAME} --namespace ${ns} --create-namespace ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag="${helm_tag}" --set args='{"--debug"}' ${HELM3_FLAGS} - ;; - helm-upgrade) - echo "copying image for helm" - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "upgrading helm package in \"$ns\" namespace" - ${HELM3} upgrade --install --namespace ${ns} --create-namespace ${PROJECT_NAME} ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag=${helm_tag} --set args='{"--debug"}' ${HELM3_FLAGS} - ;; - helm-delete) - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "removing helm package from \"$ns\" namespace" - ${HELM3} uninstall --namespace ${ns} ${PROJECT_NAME} - ;; - helm-list) - ${HELM3} list --all --all-namespaces - ;; - clean) - ${KIND} --name "${KIND_NAME}" delete cluster - ;; - *) - echo "usage:" >&2 - echo " $0 up - create a new kind cluster" >&2 - echo " $0 clean - delete the kind cluster" >&2 - echo " $0 update - push project docker images to kind cluster registry" >&2 - echo " $0 restart project deployment pod(s) in specified namespace [default: \"${DEFAULT_NAMESPACE}\"]" >&2 - echo " $0 helm-install package(s) into provided namespace [default: \"${DEFAULT_NAMESPACE}\"]" >&2 - echo " $0 helm-upgrade - deploy the latest docker images and helm charts to kind cluster" >&2 - echo " $0 helm-delete package(s)" >&2 - echo " $0 helm-list all package(s)" >&2 -esac diff --git a/contributing/README.md b/contributing/README.md index 8b396696b..efd8bf246 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -10,13 +10,38 @@ us on [Slack]. Please also take a look at our [code of conduct], which details how contributors are expected to conduct themselves as part of the Crossplane community. +## Establishing a Development Environment + +> The Crossplane project consists of several repositories under the crossplane +> and crossplane-contrib GitHub organisations. We're experimenting with +> [Earthly] in this repository (crossplane) and crossplane-runtime. Most other +> repositories use a `Makefile`. To establish a development environment for a +> repository with a `Makefile`, try running `make && make help`. + +Crossplane is written in [Go]. You don't need to have Go installed to contribute +code to Crossplane but it helps to use an editor that understands Go. + +To setup a Crossplane development environment: + +1. Fork and clone this repository. +1. Install [Docker][get-docker] and [Earthly][get-earthly]. + +Use the `earthly` command to build and test Crossplane. Run `earthly doc` to see +available build targets. + +Useful targets include: + +* `earthly +reviewable` - Run code generators, linters, and unit tests. +* `earthly +e2e` - Run end-to-end tests. +* `earthly +hack` - Build Crossplane and deploy it to a local `kind` cluster. + ## Checklist Cheat Sheet Wondering whether something on the pull request checklist applies to your PR? Generally: * Everyone must read and follow this contribution process. -* Every PR must run (and pass) `make reviewable`. +* Every PR must run (and pass) `earthly +reviewable`. * Most PRs that touch code should touch unit tests. We want ~80% coverage. * Any significant feature should be covered by E2E tests. If you're adding a new feature, you should probably be adding or updating E2Es. @@ -63,7 +88,7 @@ Ensure each of your commits is signed-off in compliance with the [Developer Certificate of Origin] by using `git commit -s`. The Crossplane project highly values readable, idiomatic Go code. Familiarise yourself with the [Coding Style](#coding-style) section below and try to preempt any comments your -reviewers would otherwise leave. Run `make reviewable` to lint your change. +reviewers would otherwise leave. Run `earthly +reviewable` to lint your change. All Crossplane features must be covered by unit **and** end-to-end (E2E) tests. @@ -820,62 +845,13 @@ func TestExample(t *testing.T) { } ``` -## Establishing a Development Environment - -The Crossplane project consists of several repositories under the crossplane and -crossplane-contrib GitHub organisations. Most of these projects use the Upbound -[build submodule]; a library of common Makefiles. Establishing a development -environment typically requires: - -1. Forking and cloning the repository you wish to work on. -1. Installing development dependencies. -1. Running `make` to establish the build submodule. - -Run `make help` for information on the available Make targets. Useful targets -include: - -* `make reviewable` - Run code generation, linters, and unit tests. -* `make e2e` - Run end-to-end tests. -* `make` - Build Crossplane. - -Once you've built Crossplane you can deploy it to a Kubernetes cluster of your -choice. [`kind`] (Kubernetes in Docker) is a good choice for development. The -`kind.sh` script contains several utilities to deploy and run a development -build of Crossplane to `kind`: - -```bash -# Build Crossplane locally. -make - -# See what commands are available. -./cluster/local/kind.sh help - -# Start a new kind cluster. Specifying KUBE_IMAGE is optional. -KUBE_IMAGE=kindest/node:v1.27.1 ./cluster/local/kind.sh up - -# Use Helm to deploy the local build of Crossplane. -./cluster/local/kind.sh helm-install - -# Use Helm to upgrade the local build of Crossplane. -./cluster/local/kind.sh helm-upgrade -``` - -When iterating rapidly on a change it can be faster to run Crossplane as a local -process, rather than as a pod deployed by Helm to your Kubernetes cluster. Use -Helm to install your local Crossplane build per the above instructions, then: - -```bash -# Stop the Helm-deployed Crossplane pod. -kubectl -n crossplane-system scale deploy crossplane --replicas=0 - -# Run Crossplane locally; it should connect to your kind cluster if said cluster -# is your active kubectl context. You can also go run cmd/crossplane/main.go. -make run -``` - [Slack]: https://slack.crossplane.io/ [code of conduct]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md -[build submodule]: https://github.com/upbound/build/ +[Earthly]: https://docs.earthly.dev +[get-docker]: https://docs.docker.com/get-docker +[get-earthly]: https://earthly.dev/get-earthly +[Go]: https://go.dev +[build submodule]: https://github.com/crossplane/build/ [`kind`]: https://kind.sigs.k8s.io/ [Crossplane release cycle]: https://docs.crossplane.io/knowledge-base/guides/release-cycle [good git commit hygiene]: https://www.futurelearn.com/info/blog/telling-stories-with-your-git-history diff --git a/design/one-pager-build-with-earthly.md b/design/one-pager-build-with-earthly.md index a2bc4ecc4..396045add 100644 --- a/design/one-pager-build-with-earthly.md +++ b/design/one-pager-build-with-earthly.md @@ -57,7 +57,7 @@ I proposed we switch from Make to https://earthly.dev. Earthly targets the 'glue' layer between language-specific tools like `go` and CI systems like GitHub Actions. In Crossplane, Earthly would replace Make and -Docker. It's based on Docker's [BuildKit](buildkit), so all builds are +Docker. It's based on Docker's [BuildKit][buildkit], so all builds are containerized and hermetic. ### Configuration @@ -132,8 +132,8 @@ Here are some CI comparisons run on GitHub Actions standard workers. | Publish artifacts | ~12 minutes | ~14 minutes | | Run E2E tests | ~12 minutes | ~14 minutes | -Earthly uses [ caching to run containerized builds as fast as Make's -"native" builds. For Crossplane this primarily means two things: +Earthly uses caching to run containerized builds as fast as Make's "native" +builds. For Crossplane this primarily means two things: * It caches Go modules, and will only redownload them if `go.mod` changes. * It stores the Go build cache in a cache volume that's reused across builds. @@ -220,7 +220,7 @@ example: * https://docs.dagger.io/quickstart/428201/custom-function I could see this becoming useful if our build logic became _really_ complex, but -in most case I prefer the simpler `Earthfile` syntax. +for our current use cases I prefer the simpler `Earthfile` syntax. ### Bazel and friends @@ -230,7 +230,7 @@ multiple languages, where building the entire monorepo for every change isn't feasible. Bazel uses `BUILD` files with rules written in Starlark, a Pythonic language. -Bazel doesn't wrap tools like `go`, but completely replaces them. It's not +Bazel doesn't wrap tools like `go`, it completely replaces them. It's not compatible with Go modules for example, and instead offers tools like `gazelle` to generate a `BUILD` file from a module-based third party dependency. From ea9e4393fd9081334b006fc275ec3de1738132a1 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 10:45:44 -0700 Subject: [PATCH 248/370] Remove CRD Kustomization We could reimplement this with Earthly but I suspect it's not used. Signed-off-by: Nic Cope --- cluster/kustomization.yaml | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 cluster/kustomization.yaml diff --git a/cluster/kustomization.yaml b/cluster/kustomization.yaml deleted file mode 100644 index 3cfd869a4..000000000 --- a/cluster/kustomization.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# This kustomization can be used to remotely install all Crossplane CRDs -# by running kubectl apply -k https://github.com/crossplane/crossplane//cluster?ref=master -resources: -- crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml -- crds/apiextensions.crossplane.io_compositionrevisions.yaml -- crds/apiextensions.crossplane.io_compositions.yaml -- crds/apiextensions.crossplane.io_environmentconfigs.yaml -- crds/apiextensions.crossplane.io_usages.yaml -- crds/pkg.crossplane.io_configurationrevisions.yaml -- crds/pkg.crossplane.io_configurations.yaml -- crds/pkg.crossplane.io_controllerconfigs.yaml -- crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml -- crds/pkg.crossplane.io_functionrevisions.yaml -- crds/pkg.crossplane.io_functions.yaml -- crds/pkg.crossplane.io_locks.yaml -- crds/pkg.crossplane.io_providerrevisions.yaml -- crds/pkg.crossplane.io_providers.yaml -- crds/secrets.crossplane.io_storeconfigs.yaml From 2bea574749b0872447398ea46409e001152cff23 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 10:54:57 -0700 Subject: [PATCH 249/370] Save generated CRDs AS LOCAL I'm not currently reimplementing the crds.clean target that trims the leading --- from the CRDs because it's unclear whether it's still needed. Signed-off-by: Nic Cope --- Earthfile | 1 + ...xtensions.crossplane.io_compositeresourcedefinitions.yaml | 1 + .../apiextensions.crossplane.io_compositionrevisions.yaml | 1 + cluster/crds/apiextensions.crossplane.io_compositions.yaml | 1 + .../crds/apiextensions.crossplane.io_environmentconfigs.yaml | 1 + cluster/crds/apiextensions.crossplane.io_usages.yaml | 1 + cluster/crds/pkg.crossplane.io_configurationrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_configurations.yaml | 1 + cluster/crds/pkg.crossplane.io_controllerconfigs.yaml | 1 + cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml | 5 +---- cluster/crds/pkg.crossplane.io_functionrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_functions.yaml | 1 + cluster/crds/pkg.crossplane.io_locks.yaml | 1 + cluster/crds/pkg.crossplane.io_providerrevisions.yaml | 1 + cluster/crds/pkg.crossplane.io_providers.yaml | 1 + cluster/crds/secrets.crossplane.io_storeconfigs.yaml | 1 + 16 files changed, 16 insertions(+), 4 deletions(-) diff --git a/Earthfile b/Earthfile index 6ad5f3d04..58355e19e 100644 --- a/Earthfile +++ b/Earthfile @@ -124,6 +124,7 @@ go-generate: COPY --dir hack/ apis/ internal/ . RUN go generate -tags 'generate' ./apis/... SAVE ARTIFACT apis/ AS LOCAL apis + SAVE ARTIFACT cluster/crds AS LOCAL cluster/crds # go-build builds Crossplane binaries for your native OS and architecture. go-build: diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 6b6af162d..0baefa7b8 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 3c4b7db94..cb60d3324 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 61e6d4c66..f38e0660c 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 1d80d90b2..03db70ad1 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index cc8f7e557..34b658999 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index bd101a2e8..d1b5ef789 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index 42022bdba..68281a426 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index cf23a2caf..50dbff689 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index 35a599ab7..a4e79ec44 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -4211,8 +4212,6 @@ spec: ip: description: IP address of the host file entry. type: string - required: - - ip type: object type: array x-kubernetes-list-map-keys: @@ -4267,8 +4266,6 @@ spec: More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? type: string - required: - - name type: object x-kubernetes-map-type: atomic type: array diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index d7ba776e2..ef541371a 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 7e5342cc6..e0a43d03a 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_locks.yaml b/cluster/crds/pkg.crossplane.io_locks.yaml index 4daff56a3..b55f731bf 100644 --- a/cluster/crds/pkg.crossplane.io_locks.yaml +++ b/cluster/crds/pkg.crossplane.io_locks.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 439140a4c..ecafb8762 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 717433f4f..677fbf9c5 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index 989d5fcb4..85ffc16a6 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: From c758343965da4f48cf05981b5aa6f9afb5c4f5b5 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 11:39:29 -0700 Subject: [PATCH 250/370] Patch the DeploymentRuntimeConfig CRD This is hacky and I hate it, but I can't think of a better approach. Signed-off-by: Nic Cope --- Earthfile | 18 ++++++++++++++++++ ...crossplane.io_deploymentruntimeconfigs.yaml | 5 ++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 58355e19e..91b91e1ec 100644 --- a/Earthfile +++ b/Earthfile @@ -121,8 +121,17 @@ go-modules-tidy: go-generate: FROM +go-modules CACHE --id go-build --sharing shared /root/.cache/go-build + COPY +kubectl-setup/kubectl /usr/local/bin/kubectl + COPY --dir cluster/crd-patches cluster/crd-patches COPY --dir hack/ apis/ internal/ . RUN go generate -tags 'generate' ./apis/... + # TODO(negz): Can this move into generate.go? Ideally it would live there with + # the code that actually generates the CRDs, but it depends on kubectl. + RUN kubectl patch --local --type=json \ + --patch-file cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml \ + --filename cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml \ + --output=yaml > /tmp/patched.yaml \ + && mv /tmp/patched.yaml cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml SAVE ARTIFACT apis/ AS LOCAL apis SAVE ARTIFACT cluster/crds AS LOCAL cluster/crds @@ -256,6 +265,15 @@ helm-build: RUN helm package --version ${CROSSPLANE_CHART_VERSION} --app-version ${CROSSPLANE_CHART_VERSION} -d output . SAVE ARTIFACT output AS LOCAL _output/charts +# kubectl-setup is used by other targets to setup kubectl. +kubectl-setup: + ARG KUBECTL_VERSION=v1.30.1 + ARG NATIVEOS + ARG NATIVEARCH + FROM curlimages/curl:8.8.0 + RUN curl -fsSL https://dl.k8s.io/${KUBECTL_VERSION}/kubernetes-client-${NATIVEOS}-${NATIVEARCH}.tar.gz|tar zx + SAVE ARTIFACT kubernetes/client/bin/kubectl + # kind-setup is used by other targets to setup kind. kind-setup: ARG KIND_VERSION=v0.21.0 diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index a4e79ec44..35a599ab7 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -4212,6 +4211,8 @@ spec: ip: description: IP address of the host file entry. type: string + required: + - ip type: object type: array x-kubernetes-list-map-keys: @@ -4266,6 +4267,8 @@ spec: More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? type: string + required: + - name type: object x-kubernetes-map-type: atomic type: array From 5023de366383b9cfde1c064e4b454c1772603276 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 11:43:00 -0700 Subject: [PATCH 251/370] Don't mention bumping golangci-lint in the workflow Its version isn't specified there anymore. Signed-off-by: Nic Cope --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index f32072250..e4ac4ca6a 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -45,7 +45,7 @@ }, { "customType": "regex", - "description": "Bump golangci-lint version in workflows and the Earthfile", + "description": "Bump golangci-lint version in the Earthfile", "fileMatch": [ "^Earthfile$" ], From d875a1fd18653223e705111d09fd295817b65448 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 12:05:09 -0700 Subject: [PATCH 252/370] Make the +e2e target work without FLAGS Signed-off-by: Nic Cope --- Earthfile | 2 +- contributing/README.md | 2 +- test/e2e/README.md | 67 ++++++++++++------------------------------ 3 files changed, 20 insertions(+), 51 deletions(-) diff --git a/Earthfile b/Earthfile index 91b91e1ec..260563360 100644 --- a/Earthfile +++ b/Earthfile @@ -44,7 +44,7 @@ generate: # e2e runs end-to-end tests. See test/e2e/README.md for details. e2e: - ARG --required FLAGS + ARG FLAGS="-test-suite=base" # Docker installs faster on Alpine, and we only need Go for go tool test2json. FROM golang:${GO_VERSION}-alpine3.20 RUN apk add --no-cache docker jq diff --git a/contributing/README.md b/contributing/README.md index efd8bf246..ed5fd72ab 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -32,7 +32,7 @@ available build targets. Useful targets include: * `earthly +reviewable` - Run code generators, linters, and unit tests. -* `earthly +e2e` - Run end-to-end tests. +* `earthly -P +e2e` - Run end-to-end tests. * `earthly +hack` - Build Crossplane and deploy it to a local `kind` cluster. ## Checklist Cheat Sheet diff --git a/test/e2e/README.md b/test/e2e/README.md index 334d418ab..08d27c758 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -17,72 +17,41 @@ All Crossplane features must be exercised by these tests, as well as unit tests. ## Running Tests -Run `make e2e` to run E2E tests. +Run `earthly -P +e2e` to run E2E tests. -This compiles Crossplane and an E2E test binary. It then runs the test binary. -Use the `E2E_TEST_FLAGS` to pass flags to the test binary. For example: +This compiles Crossplane and an E2E test binary. It then uses the test binary to +run the base test suite. Use the `FLAGS` to pass flags to the test binary. For +example: ```shell -# Most tests use t.Log to explain what they're doing. Use the -test.v flag -# (equivalent to go test -v) to see detailed test progress and logs. -E2E_TEST_FLAGS="-test.v" make e2e - # Some functions that setup the test environment (e.g. kind) use the klog logger # The -v flag controls the verbosity of klog. Use -v=4 for debug logging. -E2E_TEST_FLAGS="-test.v -v=4" make e2e +earthly -P +e2e --FLAGS="-v=4" # To run only a specific test, match it by regular expression -E2E_TEST_FLAGS="-test.run ^TestConfiguration" make e2e +earthly -P +e2e --FLAGS="-test.run ^TestConfiguration" # To test features with certain labels, use the labels flag -E2E_TEST_FLAGS="-labels area=apiextensions" make e2e +earthly -P +e2e --FLAGS="-labels area=apiextensions" # To test a specific feature, use the feature flag -E2E_TEST_FLAGS="-feature=ConfigurationWithDependency" make e2e +earthly -P +e2e --FLAGS="-feature=ConfigurationWithDependency" # Stop immediately on first test failure, and leave the kind cluster to debug. -E2E_TEST_FLAGS="-test.v -test.failfast -destroy-kind-cluster=false" - -# Use an existing Kubernetes cluster. Note that the E2E tests can't deploy your -# local build of Crossplane in this scenario, so you'll have to do it yourself. -E2E_TEST_FLAGS="-create-kind-cluster=false -destroy-kind-cluster=false -kubeconfig=$HOME/.kube/config" make e2e - -# Run the CrossplaneUpgrade feature, against an existing kind cluster named -# "kind" (or creating it if it doesn't exist), # without installing Crossplane -# first, as the feature expects the cluster to be empty, but still loading the -# images to it. Setting the tests to fail fast and not destroying the cluster -# afterward in order to allow debugging it. -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -destroy-kind-cluster=false \ - -kind-cluster-name=kind \ - -preinstall-crossplane=false \ - -feature=CrossplaneUpgrade" make e2e - -# Run all the tests not installing or upgrading Crossplane against the currently -# selected cluster where Crossplane has already been installed. -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -kubeconfig=$HOME/.kube/config \ - -skip-labels modify-crossplane-installation=true \ - -create-kind-cluster=false \ - -preinstall-crossplane=false" make go.build e2e-run-tests - -# Run the composition-webhook-schema-validation suite of tests, which will -# result in all tests marked as "test-suite=base" or -# "test-suite=composition-webhook-schema-validation" being run against a kind -# cluster with Crossplane installed with composition-webhook-schema-validation -# enabled -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -test-suite=composition-webhook-schema-validation " make e2e +earthly -i -P +e2e --FLAGS="-test.failfast -destroy-kind-cluster=false" + +# Run a specific test suite. +earthly -P +e2e --FLAGS="-test.v -test-suite=composition-webhook-schema-validation" ``` ## Test Parallelism -`make e2e` runs all defined E2E tests serially. Tests do not run in parallel. -This is because all tests run against the same API server and Crossplane has a -lot of cluster-scoped state - XRDs, Providers, Compositions, etc. It's easier -and less error-prone to write tests when you don't have to worry about one test -potentially conflicting with another - for example by installing the same -provider another test would install. +`earthly -P +e2e` runs all defined E2E tests serially. Tests do not run in +parallel. This is because all tests run against the same API server and +Crossplane has a lot of cluster-scoped state - XRDs, Providers, Compositions, +etc. It's easier and less error-prone to write tests when you don't have to +worry about one test potentially conflicting with another - for example by +installing the same provider another test would install. The [CI GitHub workflow] uses a matrix strategy to run multiple jobs in parallel, each running a test suite, see the dedicated section for more details. From ba9e43fe16dc3dd5476dabe852b4be49aedf88a2 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 13:38:54 -0700 Subject: [PATCH 253/370] Use the correct platform for the hack targets TARGETPLATFORM is incorrect on MacOS, where TARGETOS will be Linux. There we want USERPLATFORM, i.e. Darwin. Signed-off-by: Nic Cope --- Earthfile | 64 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/Earthfile b/Earthfile index 260563360..6b5f3398a 100644 --- a/Earthfile +++ b/Earthfile @@ -70,12 +70,13 @@ e2e: hack: # TODO(negz): This could run an interactive shell inside a temporary container # once https://github.com/earthly/earthly/issues/3206 is fixed. + ARG USERPLATFORM LOCALLY WAIT BUILD +unhack END - COPY +helm-setup/helm .hack/helm - COPY +kind-setup/kind .hack/kind + COPY --platform=${USERPLATFORM} +helm-setup/helm .hack/helm + COPY --platform=${USERPLATFORM} +kind-setup/kind .hack/kind COPY (+helm-build/output --CROSSPLANE_VERSION=v0.0.0-hack) .hack/charts WITH DOCKER --load crossplane-hack/crossplane:hack=+image RUN \ @@ -90,8 +91,9 @@ hack: # unhack deletes the kind cluster created by the hack target. unhack: + ARG USERPLATFORM LOCALLY - COPY +kind-setup/kind .hack/kind + COPY --platform=${USERPLATFORM} +kind-setup/kind .hack/kind RUN .hack/kind delete cluster --name crossplane-hack RUN rm -rf .hack @@ -140,7 +142,6 @@ go-build: ARG EARTHLY_GIT_SHORT_HASH ARG EARTHLY_GIT_COMMIT_TIMESTAMP ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} - ARG NATIVEPLATFORM ARG TARGETARCH ARG TARGETOS ARG GOARCH=${TARGETARCH} @@ -268,51 +269,56 @@ helm-build: # kubectl-setup is used by other targets to setup kubectl. kubectl-setup: ARG KUBECTL_VERSION=v1.30.1 - ARG NATIVEOS - ARG NATIVEARCH - FROM curlimages/curl:8.8.0 - RUN curl -fsSL https://dl.k8s.io/${KUBECTL_VERSION}/kubernetes-client-${NATIVEOS}-${NATIVEARCH}.tar.gz|tar zx + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://dl.k8s.io/${KUBECTL_VERSION}/kubernetes-client-${TARGETOS}-${TARGETARCH}.tar.gz|tar zx SAVE ARTIFACT kubernetes/client/bin/kubectl # kind-setup is used by other targets to setup kind. kind-setup: ARG KIND_VERSION=v0.21.0 - ARG NATIVEOS - ARG NATIVEARCH - FROM curlimages/curl:8.8.0 - RUN curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${NATIVEOS}-${NATIVEARCH}&&chmod +x kind + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${TARGETOS}-${TARGETARCH}&&chmod +x kind SAVE ARTIFACT kind # gotestsum-setup is used by other targets to setup gotestsum. gotestsum-setup: ARG GOTESTSUM_VERSION=1.11.0 - ARG NATIVEOS - ARG NATIVEARCH - FROM curlimages/curl:8.8.0 - RUN curl -fsSL https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${NATIVEOS}_${NATIVEARCH}.tar.gz|tar zx>gotestsum + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${TARGETOS}_${TARGETARCH}.tar.gz|tar zx>gotestsum SAVE ARTIFACT gotestsum # helm-docs-setup is used by other targets to setup helm-docs. helm-docs-setup: ARG HELM_DOCS_VERSION=1.11.0 - ARG NATIVEOS - ARG NATIVEARCH - FROM curlimages/curl:8.8.0 - IF [ "${NATIVEARCH}" = "amd64" ] + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + IF [ "${TARGETARCH}" = "amd64" ] LET ARCH=x86_64 ELSE - LET ARCH=${NATIVEARCH} + LET ARCH=${TARGETARCH} END - RUN curl -fsSL https://github.com/norwoodj/helm-docs/releases/download/v${HELM_DOCS_VERSION}/helm-docs_${HELM_DOCS_VERSION}_${NATIVEOS}_${ARCH}.tar.gz|tar zx>helm-docs + RUN curl -fsSL https://github.com/norwoodj/helm-docs/releases/download/v${HELM_DOCS_VERSION}/helm-docs_${HELM_DOCS_VERSION}_${TARGETOS}_${ARCH}.tar.gz|tar zx>helm-docs SAVE ARTIFACT helm-docs # helm-setup is used by other targets to setup helm. helm-setup: ARG HELM_VERSION=v3.14.4 - ARG NATIVEOS - ARG NATIVEARCH - FROM curlimages/curl:8.8.0 - RUN curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-${NATIVEOS}-${NATIVEARCH}.tar.gz|tar zx --strip-components=1 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-${TARGETOS}-${TARGETARCH}.tar.gz|tar zx --strip-components=1 SAVE ARTIFACT helm # Targets below this point are intended only for use in GitHub Actions CI. They @@ -349,10 +355,10 @@ ci-codeql-setup: # ci-codeql is used by CI to build Crossplane with CodeQL scanning enabled. ci-codeql: ARG CGO_ENABLED=0 - ARG NATIVEOS - ARG NATIVEARCH + ARG TARGETOS + ARG TARGETARCH FROM +go-modules - IF [ "${NATIVEARCH}" = "arm64" ] && [ "${NATIVEOS}" = "linux" ] + IF [ "${TARGETARCH}" = "arm64" ] && [ "${TARGETOS}" = "linux" ] RUN --no-cache echo "CodeQL doesn't support Linux on Apple Silicon" && false END COPY --dir +ci-codeql-setup/codeql /codeql From ce01d4c8f0ffcbb3fc77de4cd620776a99b11dd6 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 14:56:52 -0700 Subject: [PATCH 254/370] Have Renovate bump Kubectl version, too Signed-off-by: Nic Cope --- .github/renovate.json5 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index e4ac4ca6a..7e97c6402 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -92,6 +92,18 @@ "datasourceTemplate": "github-tags", "depNameTemplate": "kubernetes-sigs/kind", }, + { + "customType": "regex", + "description": "Bump kubectl version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG KUBECTL_VERSION=v(?.*?)\\n" + ], + "datasourceTemplate": "github-tags", + "depNameTemplate": "kubernetes/kubernetes", + }, { "customType": "regex", "description": "Bump gotestsum version in the Earthfile", From 762c793e09649c5c0dab3a76922f9d830403e76d Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 17:31:30 -0700 Subject: [PATCH 255/370] Pass AWS credits instead of using --aws I thought the experimental --aws flag did this, but it seems like it might not. There's not much documentation on it, so for now just pass AWS credentials explicitly. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 20 ++++++++++---------- .github/workflows/promote.yml | 10 +++++----- Earthfile | 18 ++++++++++-------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 47778f848..58abf3dc8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -259,19 +259,19 @@ jobs: - name: Push Artifacts to https://releases.crossplane.io/build/ if: env.AWS_USR != '' - run: earthly --strict +ci-push-build-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} - env: - AWS_DEFAULT_REGION: us-east-1 - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + run: | + earthly --strict \ + --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ + --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ + +ci-push-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} - name: Push Artifacts to https://releases.crossplane.io/master/ and https://charts.crossplane.io/master if: env.AWS_USR != '' && github.ref == 'refs/heads/master' - run: earthly --strict +ci-promote-build-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --CHANNEL=master - env: - AWS_DEFAULT_REGION: us-east-1 - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + run: | + earthly --strict \ + --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ + --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ + +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --CHANNEL=master - name: Upload Artifacts to GitHub uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 4b2ed27c7..d1fa5881e 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -61,8 +61,8 @@ jobs: - name: Promote Build Artifacts to https://releases.crossplane.io/${{ inputs.channel }} if: env.AWS_USR != '' - run: earthly --strict +ci-promote-build-artifacts --CHANNEL=${{ inputs.channel }} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} - env: - AWS_DEFAULT_REGION: us-east-1 - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }} + run: | + earthly --strict \ + --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ + --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ + +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CHANNEL=${{ inputs.channel }} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} diff --git a/Earthfile b/Earthfile index 6b5f3398a..bd6313f5a 100644 --- a/Earthfile +++ b/Earthfile @@ -1,5 +1,5 @@ # See https://docs.earthly.dev/docs/earthfile/features -VERSION --try --raw-output --run-with-aws 0.8 +VERSION --try --raw-output 0.8 PROJECT crossplane/crossplane @@ -396,9 +396,10 @@ ci-push-build-artifacts: ARG ARTIFACTS_DIR=_output ARG EARTHLY_GIT_BRANCH ARG BUCKET_RELEASES=crossplane.releases + ARG AWS_DEFAULT_REGION FROM amazon/aws-cli:2.15.57 COPY --dir ${ARTIFACTS_DIR} artifacts - RUN --push --aws aws s3 sync --delete artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} # ci-promote-build-artifacts is used by CI to promote binary artifacts and Helm # charts to a channel. In practice, this means copying them from one S3 @@ -411,14 +412,15 @@ ci-promote-build-artifacts: ARG BUCKET_RELEASES=crossplane.releases ARG BUCKET_CHARTS=crossplane.charts ARG PRERELEASE=false + ARG AWS_DEFAULT_REGION FROM amazon/aws-cli:2.15.57 COPY +helm-setup/helm /usr/local/bin/helm - RUN --aws aws s3 sync s3://${BUCKET_CHARTS}/${CHANNEL} repo - RUN --aws aws s3 sync s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync s3://${BUCKET_CHARTS}/${CHANNEL} repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo RUN helm repo index --url ${HELM_REPO_URL} repo - RUN --push --aws aws s3 sync --delete repo s3://${BUCKET_CHARTS}/${CHANNEL} - RUN --push --aws aws s3 cp "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml - RUN --push --aws aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete repo s3://${BUCKET_CHARTS}/${CHANNEL} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} IF [ "${PRERELEASE}" = "false" ] - RUN --push --aws aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current END From 4974aa8d5107e1bcefdcd8f5504073fb9a5c8877 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 17:56:41 -0700 Subject: [PATCH 256/370] Bump Earthly version with Renovate Signed-off-by: Nic Cope --- .github/renovate.json5 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 7e97c6402..42873b9a3 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -31,6 +31,18 @@ "automated" ], "customManagers": [ + { + "customType": "regex", + "description": "Bump Earthly version in GitHub workflows", + "fileMatch": [ + "^\\.github\\/workflows\\/[^/]+\\.ya?ml$" + ], + "matchStrings": [ + "EARTHLY_VERSION '(?.*?)'\\n" + ], + "datasourceTemplate": "github-tags", + "depNameTemplate": "earthly/earthly" + }, { "customType": "regex", "description": "Bump Go version in Earthfile", From 7bb719b1bd1bb1d0b541a86b44eefdd1bdaa9c81 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 18:15:05 -0700 Subject: [PATCH 257/370] Enable remote caching We'll have to see whether this helps at all. I expect it to slow down master builds, since we'll spend time pushing everything to cache. For PR builds, I hope it'll improve some by being able to avoid compiling / linting / testing when nothing changed. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 92 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 85 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 58abf3dc8..40ede6eb6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,8 +44,21 @@ jobs: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: | + echo "EARTHLY_PUSH=true" > $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + - name: Generate Files - run: earthly --strict +generate + run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +generate - name: Count Changed Files id: changed_files @@ -77,8 +90,21 @@ jobs: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: | + echo "EARTHLY_PUSH=true" > $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + - name: Lint - run: earthly --strict +lint + run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +lint codeql: runs-on: ubuntu-22.04 @@ -100,8 +126,21 @@ jobs: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: | + echo "EARTHLY_PUSH=true" > $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + - name: Run CodeQL - run: earthly --strict +ci-codeql + run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +ci-codeql - name: Upload CodeQL Results to GitHub uses: github/codeql-action/upload-sarif@v3 @@ -151,8 +190,21 @@ jobs: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: | + echo "EARTHLY_PUSH=true" > $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + - name: Run Unit Tests - run: earthly --strict +test + run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +test - name: Publish Unit Test Coverage uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4 @@ -190,8 +242,23 @@ jobs: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: | + echo "EARTHLY_PUSH=true" > $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + - name: Run E2E Tests - run: earthly --strict --allow-privileged +e2e --FLAGS="-test.failfast -fail-fast --test-suite ${{ matrix.test-suite }}" + run: | + earthly --strict --allow-privileged --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }}-${{ matrix.test-suite}} \ + +e2e --FLAGS="-test.failfast -fail-fast --test-suite ${{ matrix.test-suite }}" - name: Publish E2E Test Flakes if: '!cancelled()' @@ -247,7 +314,18 @@ jobs: username: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} - - name: Enable Earthly to Push Artifacts + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/master' + run: echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + + - name: Configure Earthly to Push Artifacts if: env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' run: echo "EARTHLY_PUSH=true" > $GITHUB_ENV @@ -255,7 +333,7 @@ jobs: run: earthly +ci-version - name: Build and Push Artifacts - run: earthly --strict +ci-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} + run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +ci-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} - name: Push Artifacts to https://releases.crossplane.io/build/ if: env.AWS_USR != '' From 0a94235caeff66d1c3ac093ea55d9a97cccbc84a Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 18:38:14 -0700 Subject: [PATCH 258/370] Fix typo in Renovate configuration Signed-off-by: Nic Cope --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 42873b9a3..60482327f 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -145,7 +145,7 @@ // syntax is a superset of Dockerfile syntax this works to update FROM images. // https://github.com/renovatebot/renovate/issues/15975 "dockerfile": { - "fileMatch:" [ + "fileMatch": [ "(^|/)Earthfile$" ] }, From ab417f8259769beb95ef6fd9a1740d1ec58a8652 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 18:48:04 -0700 Subject: [PATCH 259/370] Actually configure Earthly to push remote cache For both required environment variables to be set we need >>, or the second will just overwrite the first. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 40ede6eb6..e8c204b13 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,8 +54,8 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' run: | - echo "EARTHLY_PUSH=true" > $GITHUB_ENV - echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Generate Files run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +generate @@ -100,8 +100,8 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' run: | - echo "EARTHLY_PUSH=true" > $GITHUB_ENV - echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Lint run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +lint @@ -136,8 +136,8 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' run: | - echo "EARTHLY_PUSH=true" > $GITHUB_ENV - echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Run CodeQL run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +ci-codeql @@ -200,8 +200,8 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' run: | - echo "EARTHLY_PUSH=true" > $GITHUB_ENV - echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Run Unit Tests run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +test @@ -252,8 +252,8 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' run: | - echo "EARTHLY_PUSH=true" > $GITHUB_ENV - echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Run E2E Tests run: | @@ -323,11 +323,11 @@ jobs: - name: Configure Earthly to Push Cache to GitHub Container Registry if: github.ref == 'refs/heads/master' - run: echo "EARTHLY_MAX_REMOTE_CACHE=true" > $GITHUB_ENV + run: echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Configure Earthly to Push Artifacts if: env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' - run: echo "EARTHLY_PUSH=true" > $GITHUB_ENV + run: echo "EARTHLY_PUSH=true" >> $GITHUB_ENV - name: Set CROSSPLANE_VERSION GitHub Environment Variable run: earthly +ci-version From 77daa7e60cbef08a82e81d74e882a919eba25829 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 18:54:58 -0700 Subject: [PATCH 260/370] Add back the --only-show-errors aws s3 flag I dropped this thinking a little more detail wouldn't hurt. Turned out it was there for a reason - there's so much output the CI job logs get truncated. Signed-off-by: Nic Cope --- Earthfile | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Earthfile b/Earthfile index bd6313f5a..8acf4019e 100644 --- a/Earthfile +++ b/Earthfile @@ -399,7 +399,7 @@ ci-push-build-artifacts: ARG AWS_DEFAULT_REGION FROM amazon/aws-cli:2.15.57 COPY --dir ${ARTIFACTS_DIR} artifacts - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} # ci-promote-build-artifacts is used by CI to promote binary artifacts and Helm # charts to a channel. In practice, this means copying them from one S3 @@ -415,12 +415,12 @@ ci-promote-build-artifacts: ARG AWS_DEFAULT_REGION FROM amazon/aws-cli:2.15.57 COPY +helm-setup/helm /usr/local/bin/helm - RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync s3://${BUCKET_CHARTS}/${CHANNEL} repo - RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_CHARTS}/${CHANNEL} repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo RUN helm repo index --url ${HELM_REPO_URL} repo - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete repo s3://${BUCKET_CHARTS}/${CHANNEL} - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors repo s3://${BUCKET_CHARTS}/${CHANNEL} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} IF [ "${PRERELEASE}" = "false" ] - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current END From 8c71af3c91871e374018888731f4991df6ebf4ae Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 29 May 2024 02:16:26 +0000 Subject: [PATCH 261/370] chore(deps): pin dependencies --- .github/workflows/ci.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8c204b13..f6005722a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: password: ${{ secrets.DOCKER_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -66,7 +66,7 @@ jobs: - name: Fail if Files Changed if: steps.changed_files.outputs.count != 0 - uses: actions/github-script@v7 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 with: script: core.setFailed('Found changed files after running earthly +generate.'') @@ -91,7 +91,7 @@ jobs: password: ${{ secrets.DOCKER_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -127,7 +127,7 @@ jobs: password: ${{ secrets.DOCKER_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -143,7 +143,7 @@ jobs: run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +ci-codeql - name: Upload CodeQL Results to GitHub - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 with: sarif_file: '_output/codeql/go.sarif' @@ -166,7 +166,7 @@ jobs: output: 'trivy-results.sarif' - name: Upload Trivy Results to GitHub - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 with: sarif_file: 'trivy-results.sarif' @@ -191,7 +191,7 @@ jobs: password: ${{ secrets.DOCKER_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -243,7 +243,7 @@ jobs: password: ${{ secrets.DOCKER_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -315,7 +315,7 @@ jobs: password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: registry: ghcr.io username: ${{ github.actor }} From 88ab861f97863b1ac5bbf929050e2d15158449fa Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 29 May 2024 02:16:31 +0000 Subject: [PATCH 262/370] chore(deps): update docker/login-action digest to 0d4c9c5 --- .github/workflows/ci.yml | 14 +++++++------- .github/workflows/scan.yaml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8c204b13..fed06c5cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,7 +38,7 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -84,7 +84,7 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -120,7 +120,7 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -184,7 +184,7 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -236,7 +236,7 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -300,14 +300,14 @@ jobs: version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} - name: Login to Upbound - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' with: registry: xpkg.upbound.io diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 797f2e6a0..5c7bb36d3 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -99,7 +99,7 @@ jobs: # we log to DockerHub to avoid rate limiting - name: Login To DockerHub - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} From 600f2abccd87fbe35684e1a43c28651d2c82cf20 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 19:36:28 -0700 Subject: [PATCH 263/370] Set --cache-control flag when copying Helm index This was lost in translation from Make -> Earthly. Signed-off-by: Nic Cope --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 8acf4019e..d8f0b27dc 100644 --- a/Earthfile +++ b/Earthfile @@ -419,7 +419,7 @@ ci-promote-build-artifacts: RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo RUN helm repo index --url ${HELM_REPO_URL} repo RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors repo s3://${BUCKET_CHARTS}/${CHANNEL} - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors --cache-control "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} IF [ "${PRERELEASE}" = "false" ] RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current From 2d9950897ba362594ae359d2c0b2124c95527c3e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 20:44:24 -0700 Subject: [PATCH 264/370] Fix Earthfile Renovate version matching and extraction Signed-off-by: Nic Cope --- .github/renovate.json5 | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 60482327f..700c156de 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -41,7 +41,8 @@ "EARTHLY_VERSION '(?.*?)'\\n" ], "datasourceTemplate": "github-tags", - "depNameTemplate": "earthly/earthly" + "depNameTemplate": "earthly/earthly", + "extractVersionTemplate": "^v(?.*)$" }, { "customType": "regex", @@ -62,11 +63,10 @@ "^Earthfile$" ], "matchStrings": [ - "ARG GOLANGCI_LINT_VERSION=v(?.*?)\\n" + "ARG GOLANGCI_LINT_VERSION=(?.*?)\\n" ], "datasourceTemplate": "github-tags", - "depNameTemplate": "golangci/golangci-lint", - "extractVersionTemplate": "^v(?.*)$" + "depNameTemplate": "golangci/golangci-lint" }, { "customType": "regex", @@ -75,7 +75,7 @@ "^Earthfile$" ], "matchStrings": [ - "ARG HELM_VERSION=v(?.*?)\\n" + "ARG HELM_VERSION=(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "helm/helm", @@ -91,6 +91,7 @@ ], "datasourceTemplate": "github-tags", "depNameTemplate": "norwoodj/helm-docs", + "extractVersionTemplate": "^v(?.*)$" }, { "customType": "regex", @@ -99,7 +100,7 @@ "^Earthfile$" ], "matchStrings": [ - "ARG KIND_VERSION=v(?.*?)\\n" + "ARG KIND_VERSION=(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "kubernetes-sigs/kind", @@ -111,7 +112,7 @@ "^Earthfile$" ], "matchStrings": [ - "ARG KUBECTL_VERSION=v(?.*?)\\n" + "ARG KUBECTL_VERSION=(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "kubernetes/kubernetes", @@ -127,6 +128,7 @@ ], "datasourceTemplate": "github-tags", "depNameTemplate": "gotestyourself/gotestsum", + "extractVersionTemplate": "^v(?.*)$" }, { "customType": "regex", @@ -135,7 +137,7 @@ "^Earthfile$" ], "matchStrings": [ - "ARG CODEQL_VERSION=v(?.*?)\\n" + "ARG CODEQL_VERSION=(?.*?)\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "github/codeql-action", From ed0fb98cc8d19a96229caf460f2b0950d583438f Mon Sep 17 00:00:00 2001 From: NeerajNagure <101815703+NeerajNagure@users.noreply.github.com> Date: Wed, 29 May 2024 09:48:01 +0530 Subject: [PATCH 265/370] Enforced immutable fields using CEL rules (#5682) Enforce immutable fields using CEL rules Signed-off-by: Neeraj Nagure --- .../v1/composition_revision_types.go | 4 +-- apis/apiextensions/v1/composition_types.go | 2 +- apis/apiextensions/v1/xrd_types.go | 8 ++--- ...zz_generated.composition_revision_types.go | 4 +-- ...plane.io_compositeresourcedefinitions.yaml | 12 +++++++ ...ns.crossplane.io_compositionrevisions.yaml | 12 +++++++ ...extensions.crossplane.io_compositions.yaml | 3 ++ test/e2e/comp_schema_validation_test.go | 5 +++ .../composition-invalid-immutable.yaml | 36 +++++++++++++++++++ .../xrd/validation/xrd-immutable-updated.yaml | 31 ++++++++++++++++ test/e2e/xrd_validation_test.go | 6 ++++ 11 files changed, 114 insertions(+), 9 deletions(-) create mode 100644 test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml create mode 100644 test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index 7b098762d..19c4d5287 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -38,7 +38,7 @@ const ( type CompositionRevisionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. @@ -118,7 +118,7 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Revision int64 `json:"revision"` } diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index 395dce165..5cc6d8960 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -24,7 +24,7 @@ import ( type CompositionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index ce4f0c61b..1b7477973 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -30,12 +30,12 @@ type CompositeResourceDefinitionSpec struct { // Group specifies the API group of the defined composite resource. // Composite resources are served under `/apis//...`. Must match the // name of the XRD (in the form `.`). - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Group string `json:"group"` // Names specifies the resource and kind names of the defined composite // resource. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Names extv1.CustomResourceDefinitionNames `json:"names"` // ClaimNames specifies the names of an optional composite resource claim. @@ -46,8 +46,8 @@ type CompositeResourceDefinitionSpec struct { // create, update, or delete a corresponding composite resource. You may add // claim names to an existing CompositeResourceDefinition, but they cannot // be changed or removed once they have been set. - // +immutable // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" ClaimNames *extv1.CustomResourceDefinitionNames `json:"claimNames,omitempty"` // ConnectionSecretKeys is the list of keys that will be exposed to the end @@ -70,7 +70,7 @@ type CompositeResourceDefinitionSpec struct { // EnforcedCompositionRef refers to the Composition resource that will be used // by all composite instances whose schema is defined by this definition. // +optional - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" EnforcedCompositionRef *CompositionReference `json:"enforcedCompositionRef,omitempty"` // DefaultCompositionUpdatePolicy is the policy used when updating composites after a new diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index 84c24172f..b126ab016 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -40,7 +40,7 @@ const ( type CompositionRevisionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. @@ -120,7 +120,7 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Revision int64 `json:"revision"` } diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 0baefa7b8..b71780bd2 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -114,6 +114,9 @@ spec: - kind - plural type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf connectionSecretKeys: description: |- ConnectionSecretKeys is the list of keys that will be exposed to the end @@ -272,12 +275,18 @@ spec: required: - name type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf group: description: |- Group specifies the API group of the defined composite resource. Composite resources are served under `/apis//...`. Must match the name of the XRD (in the form `.`). type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf metadata: description: Metadata specifies the desired metadata for the defined composite resource and claim CRD's. @@ -350,6 +359,9 @@ spec: - kind - plural type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf versions: description: |- Versions is the list of all API versions of the defined composite diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index cb60d3324..937ed7a48 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -79,6 +79,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. @@ -1579,6 +1582,9 @@ spec: description: Revision number. Newer revisions have larger numbers. format: int64 type: integer + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf writeConnectionSecretsToNamespace: description: |- WriteConnectionSecretsToNamespace specifies the namespace in which the @@ -1712,6 +1718,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. @@ -3212,6 +3221,9 @@ spec: description: Revision number. Newer revisions have larger numbers. format: int64 type: integer + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf writeConnectionSecretsToNamespace: description: |- WriteConnectionSecretsToNamespace specifies the namespace in which the diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index f38e0660c..0cd320c31 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -74,6 +74,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. diff --git a/test/e2e/comp_schema_validation_test.go b/test/e2e/comp_schema_validation_test.go index 96601fb06..48cbaa540 100644 --- a/test/e2e/comp_schema_validation_test.go +++ b/test/e2e/comp_schema_validation_test.go @@ -47,6 +47,11 @@ func TestCompositionValidation(t *testing.T) { funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition-warn-valid.yaml"), ), }, + { + // A composition that updates immutable fields should be rejected when validated in strict mode. + Name: "ImmutableCompositionFieldUpdateIsRejectedStrictMode", + Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "composition-invalid-immutable.yaml"), + }, } environment.Test(t, cases.Build(t.Name()). diff --git a/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml b/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml new file mode 100644 index 000000000..1b0e46ffb --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml @@ -0,0 +1,36 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: valid + annotations: + crossplane.io/composition-schema-aware-validation-mode: strict +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: NopResource # <-- invalid, field is immutable + resources: + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.coolField + toFieldPath: metadata.annotations[cool-field] + transforms: + - type: string + string: + type: Convert + convert: ToUpper + - type: ToCompositeFieldPath + fromFieldPath: metadata.annotations[cool-field] + toFieldPath: status.coolerField \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml b/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml new file mode 100644 index 000000000..cdaab70dd --- /dev/null +++ b/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml @@ -0,0 +1,31 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xnopresources.nop.example.org +spec: + group: nope.example.org # <-- invalid, field is immutable + names: + kind: XNopResource + plural: xnopresources + claimNames: + kind: NopResource + plural: nopresources + connectionSecretKeys: + - test + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + coolField: + type: string + coolerField: + type: string + required: + - coolField \ No newline at end of file diff --git a/test/e2e/xrd_validation_test.go b/test/e2e/xrd_validation_test.go index 952dde1a1..55c2e41c5 100644 --- a/test/e2e/xrd_validation_test.go +++ b/test/e2e/xrd_validation_test.go @@ -41,6 +41,12 @@ func TestXRDValidation(t *testing.T) { Description: "An invalid update to an XRD should be rejected.", Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "xrd-valid-updated-invalid.yaml"), }, + { + // An update to immutable XRD fields should be rejected. + Name: "ImmutableXRDFieldUpdateIsRejected", + Description: "An update to immutable XRD field should be rejected.", + Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "xrd-immutable-updated.yaml"), + }, { // An invalid XRD should be rejected. Name: "InvalidXRDIsRejected", From 1cb5caa9301f01de4b9373829c864167e8d58968 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 21:38:47 -0700 Subject: [PATCH 266/370] Have Renovate run earthly, not make Signed-off-by: Nic Cope --- .github/renovate.json5 | 7 ++----- .github/workflows/renovate.yml | 11 +++++++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 700c156de..256a90c62 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -172,9 +172,7 @@ postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ - "git submodule update --init", - "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", - "make generate", + "earthly --strict +go-generate", ], fileFilters: [ "**/*" @@ -190,8 +188,7 @@ postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ - "install-tool golang $(grep -oP \"^toolchain go\\K.+\" go.mod)", - "make go.lint", + "earthly --strict +go-lint", ], fileFilters: [ "**/*" diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index c19c77628..7316734d2 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -12,6 +12,9 @@ on: - cron: '0 8 * * *' env: + # Common versions + EARTHLY_VERSION: '0.8.11' + LOG_LEVEL: "info" jobs: @@ -23,8 +26,12 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - submodules: true + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON @@ -44,7 +51,7 @@ jobs: # Use GitHub API to create commits RENOVATE_PLATFORM_COMMIT: "true" LOG_LEVEL: ${{ github.event.inputs.logLevel || env.LOG_LEVEL }} - RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^git submodule update --init$", "^make generate$", "^install-tool golang \\$\\(grep -oP \"\\^toolchain go\\\\K\\.\\+\" go\\.mod\\)$", "^make go.lint$"]' + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^earthly .+"]' with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' From 9ad8dac368964b8b90f74c75775594aa42f2ab7b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 May 2024 22:19:03 -0700 Subject: [PATCH 267/370] Don't re-run CodeQL or E2E tests if nothing but the git commit changes Signed-off-by: Nic Cope --- Earthfile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Earthfile b/Earthfile index d8f0b27dc..1507777b6 100644 --- a/Earthfile +++ b/Earthfile @@ -53,7 +53,10 @@ e2e: COPY +gotestsum-setup/gotestsum /usr/local/bin/gotestsum COPY +go-build-e2e/e2e . COPY --dir cluster test . - WITH DOCKER --load crossplane-e2e/crossplane:latest=+image + # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long + # as no code changed. If the version contains a git commit (the default) the + # build layer cache is invalidated on every commit. + WITH DOCKER --load crossplane-e2e/crossplane:latest=(+image --CROSSPLANE_VERSION=v0.0.0-e2e) TRY # TODO(negz:) Set GITHUB_ACTIONS=true and use RUN --raw-output when # https://github.com/earthly/earthly/issues/4143 is fixed. @@ -357,7 +360,10 @@ ci-codeql: ARG CGO_ENABLED=0 ARG TARGETOS ARG TARGETARCH - FROM +go-modules + # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long + # as no code changed. If the version contains a git commit (the default) the + # build layer cache is invalidated on every commit. + FROM +go-modules --CROSSPLANE_VERSION=v0.0.0-codeql IF [ "${TARGETARCH}" = "arm64" ] && [ "${TARGETOS}" = "linux" ] RUN --no-cache echo "CodeQL doesn't support Linux on Apple Silicon" && false END From 545dab61e23c4689154bb9ab99bf4a063e8a2114 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Wed, 29 May 2024 08:09:59 +0000 Subject: [PATCH 268/370] chore(deps): update dependency helm/helm to v3.15.1 --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index d8f0b27dc..862fac9c0 100644 --- a/Earthfile +++ b/Earthfile @@ -313,7 +313,7 @@ helm-docs-setup: # helm-setup is used by other targets to setup helm. helm-setup: - ARG HELM_VERSION=v3.14.4 + ARG HELM_VERSION=v3.15.1 ARG NATIVEPLATFORM ARG TARGETOS ARG TARGETARCH From fa21fe32cb3be8ecc0a2f43c0761e3b8491a93ee Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 11:55:27 -0700 Subject: [PATCH 269/370] TRY WITH DOCKER, not WITH DOCKER TRY Right now the order is important. See https://github.com/earthly/earthly/issues/4177 Signed-off-by: Nic Cope --- Earthfile | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Earthfile b/Earthfile index 1507777b6..9941838b2 100644 --- a/Earthfile +++ b/Earthfile @@ -53,17 +53,17 @@ e2e: COPY +gotestsum-setup/gotestsum /usr/local/bin/gotestsum COPY +go-build-e2e/e2e . COPY --dir cluster test . - # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long - # as no code changed. If the version contains a git commit (the default) the - # build layer cache is invalidated on every commit. - WITH DOCKER --load crossplane-e2e/crossplane:latest=(+image --CROSSPLANE_VERSION=v0.0.0-e2e) - TRY + TRY + # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long + # as no code changed. If the version contains a git commit (the default) the + # build layer cache is invalidated on every commit. + WITH DOCKER --load crossplane-e2e/crossplane:latest=(+image --CROSSPLANE_VERSION=v0.0.0-e2e) # TODO(negz:) Set GITHUB_ACTIONS=true and use RUN --raw-output when # https://github.com/earthly/earthly/issues/4143 is fixed. RUN gotestsum --no-color=false --format testname --junitfile e2e-tests.xml --raw-command go tool test2json -t -p E2E ./e2e -test.v ${FLAGS} - FINALLY - SAVE ARTIFACT --if-exists e2e-tests.xml AS LOCAL _output/tests/e2e-tests.xml END + FINALLY + SAVE ARTIFACT --if-exists e2e-tests.xml AS LOCAL _output/tests/e2e-tests.xml END # hack builds Crossplane, and deploys it to a kind cluster. It runs in your From 2628e27aedb012a9627e6ecdff4169a41926fd3a Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 12:52:18 -0700 Subject: [PATCH 270/370] Only match github/codeql-action tags starting with codeql-bundle-.+ The repo has two forms of tag, and this is the one we're interested in. Signed-off-by: Nic Cope --- .github/renovate.json5 | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 256a90c62..19c415614 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -141,6 +141,7 @@ ], "datasourceTemplate": "github-tags", "depNameTemplate": "github/codeql-action", + "extractVersionTemplate": "^codeql-bundle-(?.*)$" }, ], // Renovate doesn't have native Earthfile support, but because Earthfile From c9ce9752486fea9de742007b22dcc7769d0a5cd7 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 15:01:39 -0700 Subject: [PATCH 271/370] Bump golangci-lint to v1.59.0 Signed-off-by: Nic Cope --- .golangci.yml | 10 ---------- Earthfile | 2 +- cmd/crank/beta/render/cmd.go | 10 +++++----- cmd/crank/beta/top/top.go | 12 ++++++------ cmd/crank/version/version.go | 4 ++-- cmd/crank/xpkg/login.go | 16 ++++++++++------ cmd/crossplane/main.go | 2 +- 7 files changed, 25 insertions(+), 31 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 09df62272..90f918be7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,17 +14,7 @@ linters: disable: # These linters are all deprecated. We disable them explicitly to avoid the # linter logging deprecation warnings. - - deadcode - execinquery - - varcheck - - scopelint - - structcheck - - interfacer - - exhaustivestruct - - ifshort - - golint - - maligned - - nosnakecase # These are linters we'd like to enable, but that will be labor intensive to # make existing code compliant. diff --git a/Earthfile b/Earthfile index f2d1c511d..2c14638dc 100644 --- a/Earthfile +++ b/Earthfile @@ -191,7 +191,7 @@ go-test: # go-lint lints Go code. go-lint: - ARG GOLANGCI_LINT_VERSION=v1.58.2 + ARG GOLANGCI_LINT_VERSION=v1.59.0 FROM +go-modules # This cache is private because golangci-lint doesn't support concurrent runs. CACHE --id go-lint --sharing private /root/.cache/golangci-lint diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/beta/render/cmd.go index 841f49062..c11e51c80 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/beta/render/cmd.go @@ -134,7 +134,7 @@ func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit warns, errs := comp.Validate() for _, warn := range warns { - fmt.Fprintf(k.Stderr, "WARN(composition): %s\n", warn) + _, _ = fmt.Fprintf(k.Stderr, "WARN(composition): %s\n", warn) } if len(errs) > 0 { return errors.Wrapf(errs.ToAggregate(), "invalid Composition %q", comp.GetName()) @@ -221,13 +221,13 @@ func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit } } - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(out.CompositeResource, os.Stdout); err != nil { return errors.Wrapf(err, "cannot marshal composite resource %q to YAML", xr.GetName()) } for i := range out.ComposedResources { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(&out.ComposedResources[i], os.Stdout); err != nil { return errors.Wrapf(err, "cannot marshal composed resource %q to YAML", out.ComposedResources[i].GetAnnotations()[AnnotationKeyCompositionResourceName]) } @@ -235,7 +235,7 @@ func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit if c.IncludeFunctionResults { for i := range out.Results { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(&out.Results[i], os.Stdout); err != nil { return errors.Wrap(err, "cannot marshal result to YAML") } @@ -243,7 +243,7 @@ func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit } if c.IncludeContext { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(out.Context, os.Stdout); err != nil { return errors.Wrap(err, "cannot marshal context to YAML") } diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index 60a224beb..47c375df5 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -138,7 +138,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger.Debug("Fetched all Crossplane pods", "pods", crossplanePods, "namespace", c.Namespace) if len(crossplanePods) == 0 { - fmt.Fprintln(k.Stdout, "No Crossplane pods found in the namespace", c.Namespace) + _, _ = fmt.Fprintln(k.Stdout, "No Crossplane pods found in the namespace", c.Namespace) return nil } @@ -169,7 +169,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { if c.Summary { printPodsSummary(k.Stdout, crossplanePods) logger.Debug("Printed pods summary") - fmt.Fprintln(k.Stdout) + _, _ = fmt.Fprintln(k.Stdout) } if err := printPodsTable(k.Stdout, crossplanePods); err != nil { @@ -227,7 +227,7 @@ func printPodsSummary(w io.Writer, pods []topMetrics) { } // Print summary directly to the provided writer - fmt.Fprintf(w, "Nr of Crossplane pods: %d\n", len(pods)) + _, _ = fmt.Fprintf(w, "Nr of Crossplane pods: %d\n", len(pods)) // Sort categories alphabetically to ensure consistent output categories := make([]string, 0, len(categoryCounts)) for category := range categoryCounts { @@ -235,10 +235,10 @@ func printPodsSummary(w io.Writer, pods []topMetrics) { } sort.Strings(categories) for _, category := range categories { - fmt.Fprintf(w, "%s: %d\n", capitalizeFirst(category), categoryCounts[category]) + _, _ = fmt.Fprintf(w, "%s: %d\n", capitalizeFirst(category), categoryCounts[category]) } - fmt.Fprintf(w, "Memory: %s\n", fmt.Sprintf("%vMi", totalMemoryUsage.Value()/(1024*1024))) - fmt.Fprintf(w, "CPU(cores): %s\n", fmt.Sprintf("%vm", totalCPUUsage.MilliValue())) + _, _ = fmt.Fprintf(w, "Memory: %s\n", fmt.Sprintf("%vMi", totalMemoryUsage.Value()/(1024*1024))) + _, _ = fmt.Fprintf(w, "CPU(cores): %s\n", fmt.Sprintf("%vm", totalCPUUsage.MilliValue())) } func getCrossplanePods(pods []v1.Pod) []topMetrics { diff --git a/cmd/crank/version/version.go b/cmd/crank/version/version.go index 7ce7e506b..13019eb95 100644 --- a/cmd/crank/version/version.go +++ b/cmd/crank/version/version.go @@ -39,7 +39,7 @@ type Cmd struct { // Run runs the version command. func (c *Cmd) Run(k *kong.Context) error { - fmt.Fprintln(k.Stdout, "Client Version: "+version.New().GetVersionString()) + _, _ = fmt.Fprintln(k.Stdout, "Client Version: "+version.New().GetVersionString()) if c.Client { return nil } @@ -52,7 +52,7 @@ func (c *Cmd) Run(k *kong.Context) error { return errors.Wrap(err, errGetCrossplaneVersion) } if vxp != "" { - fmt.Fprintln(k.Stdout, "Server Version: "+vxp) + _, _ = fmt.Fprintln(k.Stdout, "Server Version: "+vxp) } return nil diff --git a/cmd/crank/xpkg/login.go b/cmd/crank/xpkg/login.go index f51a9275d..6acc2757b 100644 --- a/cmd/crank/xpkg/login.go +++ b/cmd/crank/xpkg/login.go @@ -162,8 +162,8 @@ func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { if err := upCtx.CfgSrc.UpdateConfig(upCtx.Cfg); err != nil { return errors.Wrap(err, "failed to update config") } - fmt.Fprintln(k.Stdout, "Login successful.") - return nil + _, err = fmt.Fprintln(k.Stdout, "Login successful.") + return err } func (c *loginCmd) setupCredentials() error { @@ -204,21 +204,25 @@ func getPassword(f *os.File) (string, error) { if !term.IsTerminal(int(f.Fd())) { return "", errors.New("not a terminal") } - fmt.Fprintf(f, "Password: ") + if _, err := fmt.Fprintf(f, "Password: "); err != nil { + return "", err + } password, err := term.ReadPassword(int(f.Fd())) if err != nil { return "", err } // Print a new line because ReadPassword does not. - _, _ = fmt.Fprintf(f, "\n") - return string(password), nil + _, err = fmt.Fprintf(f, "\n") + return string(password), err } func getUsername(f *os.File) (string, error) { if !term.IsTerminal(int(f.Fd())) { return "", errors.New("not a terminal") } - fmt.Fprintf(f, "Username: ") + if _, err := fmt.Fprintf(f, "Username: "); err != nil { + return "", err + } reader := bufio.NewReader(f) s, err := reader.ReadString('\n') if err != nil { diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index c1a3f586e..905df688f 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -75,7 +75,7 @@ func (d debugFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // Be } func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // BeforeApply requires this signature. - fmt.Fprintln(app.Stdout, version.New().GetVersionString()) + _, _ = fmt.Fprintln(app.Stdout, version.New().GetVersionString()) app.Exit(0) return nil } From f747d784de4c75fbf17cd3475075f545836f2234 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 16:01:22 -0700 Subject: [PATCH 272/370] Remove superfluous ' in check-diff Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b81498c9..0704831a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,7 +68,7 @@ jobs: if: steps.changed_files.outputs.count != 0 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 with: - script: core.setFailed('Found changed files after running earthly +generate.'') + script: core.setFailed('Found changed files after running earthly +generate.') lint: runs-on: ubuntu-22.04 From 6cf19c3c804d783be620eb107bf397223d81bcba Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 18:17:12 -0700 Subject: [PATCH 273/370] Fix EARTHLY_VERSION matching in GitHub Workflows Signed-off-by: Nic Cope --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 19c415614..e8e69cf92 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -38,7 +38,7 @@ "^\\.github\\/workflows\\/[^/]+\\.ya?ml$" ], "matchStrings": [ - "EARTHLY_VERSION '(?.*?)'\\n" + "EARTHLY_VERSION: '(?.*?)'\\n" ], "datasourceTemplate": "github-tags", "depNameTemplate": "earthly/earthly", From 64094bbdade2b88a2e03107793a03971d02fcd32 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 01:41:38 +0000 Subject: [PATCH 274/370] chore(deps): update dependency earthly/earthly to v0.8.13 --- .github/workflows/ci.yml | 2 +- .github/workflows/promote.yml | 2 +- .github/workflows/renovate.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0704831a1..9d2bbe5df 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ on: env: # Common versions - EARTHLY_VERSION: '0.8.11' + EARTHLY_VERSION: '0.8.13' # Force Earthly to use color output FORCE_COLOR: "1" diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index d1fa5881e..e211c26c5 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -20,7 +20,7 @@ on: env: # Common versions - EARTHLY_VERSION: '0.8.11' + EARTHLY_VERSION: '0.8.13' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 7316734d2..b1390ff79 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -13,7 +13,7 @@ on: env: # Common versions - EARTHLY_VERSION: '0.8.11' + EARTHLY_VERSION: '0.8.13' LOG_LEVEL: "info" From ade69496440bb7a7647fb74b59f5297e9121dcb1 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 01:41:45 +0000 Subject: [PATCH 275/370] chore(deps): update dependency norwoodj/helm-docs to v1.13.1 --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index ec98a4080..225eeac7f 100644 --- a/Earthfile +++ b/Earthfile @@ -301,7 +301,7 @@ gotestsum-setup: # helm-docs-setup is used by other targets to setup helm-docs. helm-docs-setup: - ARG HELM_DOCS_VERSION=1.11.0 + ARG HELM_DOCS_VERSION=1.13.1 ARG NATIVEPLATFORM ARG TARGETOS ARG TARGETARCH From 3c91ac884d5a8715b3981a1a3e98868e9cf869ed Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 08:09:58 +0000 Subject: [PATCH 276/370] chore(deps): update dependency gotestyourself/gotestsum to v1.12.0 --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 225eeac7f..af9fd4511 100644 --- a/Earthfile +++ b/Earthfile @@ -291,7 +291,7 @@ kind-setup: # gotestsum-setup is used by other targets to setup gotestsum. gotestsum-setup: - ARG GOTESTSUM_VERSION=1.11.0 + ARG GOTESTSUM_VERSION=1.12.0 ARG NATIVEPLATFORM ARG TARGETOS ARG TARGETARCH From e380e5940e4ad29711e13f9e05b8d6f9b153c153 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 19:04:57 -0700 Subject: [PATCH 277/370] Have Renovate install Earthly It needs to be in the Docker container, not the runner. Signed-off-by: Nic Cope --- .github/renovate-entrypoint.sh | 7 +++++++ .github/workflows/renovate.yml | 9 +++------ 2 files changed, 10 insertions(+), 6 deletions(-) create mode 100755 .github/renovate-entrypoint.sh diff --git a/.github/renovate-entrypoint.sh b/.github/renovate-entrypoint.sh new file mode 100755 index 000000000..4ef46f961 --- /dev/null +++ b/.github/renovate-entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +curl -fsSLo /usr/local/bin/earthly https://github.com/earthly/earthly/releases/latest/download/earthly-linux-amd64 +chmod +x /usr/local/bin/earthly +/usr/local/bin/earthly bootstrap + +renovate diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index b1390ff79..799766699 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -27,12 +27,6 @@ jobs: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - name: Setup Earthly - uses: earthly/actions-setup@v1 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - version: ${{ env.EARTHLY_VERSION }} - # Don't waste time starting Renovate if JSON is invalid - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator @@ -55,3 +49,6 @@ jobs: with: configurationFile: .github/renovate.json5 token: '${{ steps.get-github-app-token.outputs.token }}' + mount-docker-socket: true + docker-user: root + docker-cmd-file: .github/renovate-entrypoint.sh From 6d78352e42dae5c642f184be12c02f7b66088ca4 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 29 May 2024 19:20:39 -0700 Subject: [PATCH 278/370] Have Renovate only run earthly on master Right now master has an Earthfile and all release branches have a Makefile. Eventually they'll all arrive at one implementation or the other depending on whether we decide to adopt Earthly. Signed-off-by: Nic Cope --- .github/renovate.json5 | 46 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index e8e69cf92..429ce2ef2 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -166,10 +166,12 @@ // be at the beginning, high priority at the end "packageRules": [ { - "description": "Generate code after upgrading go dependencies", + "description": "Generate code after upgrading go dependencies (master)", "matchDatasources": [ "go" ], + // Currently we only have an Earthfile on master. + matchBaseBranches: ["master"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -182,10 +184,30 @@ }, }, { - "description": "Lint code after upgrading golangci-lint", + "description": "Generate code after upgrading go dependencies (release branch)", + "matchDatasources": [ + "go" + ], + // Currently we only have an Earthfile on master. + matchBaseBranches: ["release-.+"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "make go.generate", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "description": "Lint code after upgrading golangci-lint (master)", "matchDepNames": [ "golangci/golangci-lint" ], + // Currently we only have an Earthfile on master. + matchBaseBranches: ["master"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -197,6 +219,24 @@ executionMode: "update", }, }, + { + "description": "Lint code after upgrading golangci-lint (release branch)", + "matchDepNames": [ + "golangci/golangci-lint" + ], + // Currently we only have an Earthfile on master. + matchBaseBranches: ["release-.+"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "make go.lint", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, { "matchManagers": [ "crossplane" @@ -287,4 +327,4 @@ "groupName": "golang version", } ], -} \ No newline at end of file +} From f137f43e6b8a7e76e6d839eb7fda45b2b0ff7915 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 30 May 2024 14:41:13 -0700 Subject: [PATCH 279/370] Use GitHub releases, not tags, to detect new releases We download release artifacts, so releases are a better signal. With tags, it's possible a new tag is created without a corresponding release. This would be a false positive, since we wouldn't actually be able to download a release artifact. Signed-off-by: Nic Cope --- .github/renovate.json5 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 429ce2ef2..7f592e9ef 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -40,7 +40,7 @@ "matchStrings": [ "EARTHLY_VERSION: '(?.*?)'\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "earthly/earthly", "extractVersionTemplate": "^v(?.*)$" }, @@ -65,7 +65,7 @@ "matchStrings": [ "ARG GOLANGCI_LINT_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "golangci/golangci-lint" }, { @@ -77,7 +77,7 @@ "matchStrings": [ "ARG HELM_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "helm/helm", }, { @@ -89,7 +89,7 @@ "matchStrings": [ "ARG HELM_DOCS_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "norwoodj/helm-docs", "extractVersionTemplate": "^v(?.*)$" }, @@ -102,7 +102,7 @@ "matchStrings": [ "ARG KIND_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "kubernetes-sigs/kind", }, { @@ -114,7 +114,7 @@ "matchStrings": [ "ARG KUBECTL_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "kubernetes/kubernetes", }, { @@ -126,7 +126,7 @@ "matchStrings": [ "ARG GOTESTSUM_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "gotestyourself/gotestsum", "extractVersionTemplate": "^v(?.*)$" }, @@ -139,7 +139,7 @@ "matchStrings": [ "ARG CODEQL_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "github/codeql-action", "extractVersionTemplate": "^codeql-bundle-(?.*)$" }, From df0a2f042d4565624414c5e7f2d255ea5a52990b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 30 May 2024 14:46:35 -0700 Subject: [PATCH 280/370] Use correct URL when indexing Helm repo The URL should include the channel, e.g. see: https://github.com/crossplane/build/blob/b0dfb8fbe8c626e3e0f0b6efdfac926d750adf48/makelib/helm.mk#L154 Signed-off-by: Nic Cope --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index af9fd4511..3e635659a 100644 --- a/Earthfile +++ b/Earthfile @@ -423,7 +423,7 @@ ci-promote-build-artifacts: COPY +helm-setup/helm /usr/local/bin/helm RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_CHARTS}/${CHANNEL} repo RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo - RUN helm repo index --url ${HELM_REPO_URL} repo + RUN helm repo index --url ${HELM_REPO_URL}/${CHANNEL} repo RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors repo s3://${BUCKET_CHARTS}/${CHANNEL} RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors --cache-control "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} From 8394321671dcfbb56262ed3eaa276b706e9a46a2 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 30 May 2024 15:19:00 -0700 Subject: [PATCH 281/370] USE GITHUB_REF, not EARTHLY_GIT_BRANCH for S3 uploads ${GITHUB_REF##*/} somewhat confusingly resolves to 'merge' for PRs and (e.g.) 'master' for merges to a branch. I'm not sure 'merge' is what we _really_ want (as opposed to the PR number) but it's what the build submodule does, so keep compatibility with that. EARTHLY_GIT_BRANCH was working for master, but was resolving to HEAD for PR builds. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 4 ++-- .github/workflows/promote.yml | 2 +- Earthfile | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d2bbe5df..e3e4c9f14 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -341,7 +341,7 @@ jobs: earthly --strict \ --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ - +ci-push-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} + +ci-push-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --BUILD_DIR=${GITHUB_REF##*/} - name: Push Artifacts to https://releases.crossplane.io/master/ and https://charts.crossplane.io/master if: env.AWS_USR != '' && github.ref == 'refs/heads/master' @@ -349,7 +349,7 @@ jobs: earthly --strict \ --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ - +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --CHANNEL=master + +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --BUILD_DIR=${GITHUB_REF##*/} --CHANNEL=master - name: Upload Artifacts to GitHub uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index e211c26c5..143d6e217 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -65,4 +65,4 @@ jobs: earthly --strict \ --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ - +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CHANNEL=${{ inputs.channel }} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} + +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CHANNEL=${{ inputs.channel }} --BUILD_DIR=${GITHUB_REF##*/} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} diff --git a/Earthfile b/Earthfile index 3e635659a..eaa0e0203 100644 --- a/Earthfile +++ b/Earthfile @@ -399,22 +399,22 @@ ci-promote-image: # ci-push-build-artifacts is used by CI to push binary artifacts to S3. ci-push-build-artifacts: ARG --required CROSSPLANE_VERSION + ARG --required BUILD_DIR ARG ARTIFACTS_DIR=_output - ARG EARTHLY_GIT_BRANCH ARG BUCKET_RELEASES=crossplane.releases ARG AWS_DEFAULT_REGION FROM amazon/aws-cli:2.15.57 COPY --dir ${ARTIFACTS_DIR} artifacts - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors artifacts s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} # ci-promote-build-artifacts is used by CI to promote binary artifacts and Helm # charts to a channel. In practice, this means copying them from one S3 # directory to another. ci-promote-build-artifacts: ARG --required CROSSPLANE_VERSION + ARG --required BUILD_DIR ARG --required CHANNEL ARG HELM_REPO_URL=https://charts.crossplane.io - ARG EARTHLY_GIT_BRANCH ARG BUCKET_RELEASES=crossplane.releases ARG BUCKET_CHARTS=crossplane.charts ARG PRERELEASE=false @@ -422,11 +422,11 @@ ci-promote-build-artifacts: FROM amazon/aws-cli:2.15.57 COPY +helm-setup/helm /usr/local/bin/helm RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_CHARTS}/${CHANNEL} repo - RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION}/charts repo RUN helm repo index --url ${HELM_REPO_URL}/${CHANNEL} repo RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors repo s3://${BUCKET_CHARTS}/${CHANNEL} RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors --cache-control "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} IF [ "${PRERELEASE}" = "false" ] - RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current END From 4a8ec3f871ed28c35d0535cc6bfaa337ecc9646c Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 30 May 2024 15:26:58 -0700 Subject: [PATCH 282/370] Only push Docker images for master and release branches. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e3e4c9f14..82998976e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -326,7 +326,7 @@ jobs: run: echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Configure Earthly to Push Artifacts - if: env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' + if: (github.ref == 'refs/head/master' || startsWith(github.ref, 'refs/head/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' run: echo "EARTHLY_PUSH=true" >> $GITHUB_ENV - name: Set CROSSPLANE_VERSION GitHub Environment Variable From 03363904e9789b4af2756408e8dd68cd571b2cb4 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 22:48:37 +0000 Subject: [PATCH 283/370] fix(deps): update module github.com/alecthomas/kong to v0.9.0 --- go.mod | 2 +- go.sum | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 5d4abdd50..1b7846ee8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( dario.cat/mergo v1.0.0 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 - github.com/alecthomas/kong v0.8.1 + github.com/alecthomas/kong v0.9.0 github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 diff --git a/go.sum b/go.sum index a9fce6aea..cdbef5884 100644 --- a/go.sum +++ b/go.sum @@ -45,12 +45,12 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0= -github.com/alecthomas/assert/v2 v2.1.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA= -github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY= -github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= -github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= -github.com/alecthomas/repr v0.1.0/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= +github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= +github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= +github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= From 33202e915e67c087de7a20a139103e7398f8caca Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 22:49:11 +0000 Subject: [PATCH 284/370] fix(deps): update module github.com/aws/smithy-go to v1.20.2 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5d4abdd50..79227020d 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect - github.com/aws/smithy-go v1.19.0 + github.com/aws/smithy-go v1.20.2 github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index a9fce6aea..f89975963 100644 --- a/go.sum +++ b/go.sum @@ -87,8 +87,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5 github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE9iTYD0gFmXVax9E= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 2f85a6066eb5868127e45c542ae010f4fd9557f5 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 23:14:37 +0000 Subject: [PATCH 285/370] chore(deps): update amazon/aws-cli docker tag to v2.15.61 --- Earthfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Earthfile b/Earthfile index af9fd4511..6d633bfae 100644 --- a/Earthfile +++ b/Earthfile @@ -403,7 +403,7 @@ ci-push-build-artifacts: ARG EARTHLY_GIT_BRANCH ARG BUCKET_RELEASES=crossplane.releases ARG AWS_DEFAULT_REGION - FROM amazon/aws-cli:2.15.57 + FROM amazon/aws-cli:2.15.61 COPY --dir ${ARTIFACTS_DIR} artifacts RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors artifacts s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION} @@ -419,7 +419,7 @@ ci-promote-build-artifacts: ARG BUCKET_CHARTS=crossplane.charts ARG PRERELEASE=false ARG AWS_DEFAULT_REGION - FROM amazon/aws-cli:2.15.57 + FROM amazon/aws-cli:2.15.61 COPY +helm-setup/helm /usr/local/bin/helm RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_CHARTS}/${CHANNEL} repo RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${EARTHLY_GIT_BRANCH}/${CROSSPLANE_VERSION}/charts repo From dde1be1bc271c28f0e4bc3e3e5c7d3040e355bb3 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 23:16:42 +0000 Subject: [PATCH 286/370] fix(deps): update module github.com/go-git/go-git/v5 to v5.12.0 --- go.mod | 8 ++++---- go.sum | 21 ++++++++++----------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 1b7846ee8..2d38f2005 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.2 github.com/go-git/go-billy/v5 v5.5.0 - github.com/go-git/go-git/v5 v5.11.0 + github.com/go-git/go-git/v5 v5.12.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.19.1 @@ -47,7 +47,7 @@ require ( require ( github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -77,8 +77,8 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xlab/treeprint v1.2.0 // indirect diff --git a/go.sum b/go.sum index cdbef5884..2b9e351e9 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= @@ -182,8 +182,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -192,8 +192,8 @@ github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -381,14 +381,14 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -628,7 +628,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= From a64ff1050dd79b9ec592a88a6634c589d99cff47 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 00:25:55 +0000 Subject: [PATCH 287/370] chore(deps): update dependency kubernetes-sigs/kind to v0.23.0 --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 921e2bf2a..5d5bf5346 100644 --- a/Earthfile +++ b/Earthfile @@ -281,7 +281,7 @@ kubectl-setup: # kind-setup is used by other targets to setup kind. kind-setup: - ARG KIND_VERSION=v0.21.0 + ARG KIND_VERSION=v0.23.0 ARG NATIVEPLATFORM ARG TARGETOS ARG TARGETARCH From de2d05329242083ab8e6e711777a11aae4a7b4f3 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 30 May 2024 17:47:26 -0700 Subject: [PATCH 288/370] Fix check for master branch to enable pushing https://github.com/crossplane/crossplane/pull/5765 Follow up to the above PR, which broke it. It should be refs/heads, not refs/head. Signed-off-by: Nic Cope --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82998976e..e5d823c58 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -326,7 +326,7 @@ jobs: run: echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Configure Earthly to Push Artifacts - if: (github.ref == 'refs/head/master' || startsWith(github.ref, 'refs/head/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' + if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' run: echo "EARTHLY_PUSH=true" >> $GITHUB_ENV - name: Set CROSSPLANE_VERSION GitHub Environment Variable From f5acba0a5333ab7e9901fd4aee7135e5513306f9 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 08:11:36 +0000 Subject: [PATCH 289/370] fix(deps): update module github.com/prometheus/client_golang to v1.19.1 --- go.mod | 5 ++--- go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index a525a2fcf..9b2287b96 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,6 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect @@ -169,9 +168,9 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/cobra v1.8.0 // indirect diff --git a/go.sum b/go.sum index 77b103531..f3109f562 100644 --- a/go.sum +++ b/go.sum @@ -322,8 +322,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -368,13 +366,13 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= From 4df8b39d3dc83b4d35ac80cc40c2dd89b1ec74e8 Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Fri, 31 May 2024 22:02:24 +0530 Subject: [PATCH 290/370] reported the status as deleted for the resources being deleted Signed-off-by: Neeraj Nagure --- cmd/crank/beta/trace/internal/printer/default.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/crank/beta/trace/internal/printer/default.go b/cmd/crank/beta/trace/internal/printer/default.go index 3641e9e56..2b306a479 100644 --- a/cmd/crank/beta/trace/internal/printer/default.go +++ b/cmd/crank/beta/trace/internal/printer/default.go @@ -216,6 +216,9 @@ func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringe syncedCond := r.GetCondition(xpv1.TypeSynced) var status, m string switch { + case r.Unstructured.GetDeletionTimestamp() != nil: + // Report the status as deleted if the resource is being deleted + status = "Deleted" case r.Error != nil: // if there is an error we want to show it status = "Error" From 0c90f3893cb73136095676280f988ca1298657f8 Mon Sep 17 00:00:00 2001 From: Neeraj Nagure Date: Sat, 1 Jun 2024 10:03:45 +0530 Subject: [PATCH 291/370] changed status to deleting Signed-off-by: Neeraj Nagure --- cmd/crank/beta/trace/internal/printer/default.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/trace/internal/printer/default.go b/cmd/crank/beta/trace/internal/printer/default.go index 2b306a479..fe16c8197 100644 --- a/cmd/crank/beta/trace/internal/printer/default.go +++ b/cmd/crank/beta/trace/internal/printer/default.go @@ -218,7 +218,7 @@ func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringe switch { case r.Unstructured.GetDeletionTimestamp() != nil: // Report the status as deleted if the resource is being deleted - status = "Deleted" + status = "Deleting" case r.Error != nil: // if there is an error we want to show it status = "Error" From 7121dae8710ecf18ccbf996bf8fcb52abb12cc5f Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Sat, 1 Jun 2024 08:08:18 +0000 Subject: [PATCH 292/370] chore(deps): update github/codeql-action digest to f079b84 --- .github/workflows/ci.yml | 4 ++-- .github/workflows/scan.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e5d823c58..8ae5452e6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -143,7 +143,7 @@ jobs: run: earthly --strict --remote-cache ghcr.io/crossplane/earthly-cache:${{ github.job }} +ci-codeql - name: Upload CodeQL Results to GitHub - uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 with: sarif_file: '_output/codeql/go.sarif' @@ -166,7 +166,7 @@ jobs: output: 'trivy-results.sarif' - name: Upload Trivy Results to GitHub - uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 with: sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 5c7bb36d3..745f681b8 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -124,7 +124,7 @@ jobs: retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v3 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} From 894da763c8ade9d82284aedeaed670f826d95680 Mon Sep 17 00:00:00 2001 From: Theo Chatzimichos Date: Sun, 9 Jun 2024 13:48:53 +0200 Subject: [PATCH 293/370] fix(Earthfile): Build also the go binaries when running +build The `+multiplatform-build` step includes `+go-multiplatform-build`, so similarly the `build` step should include `+go-build`. When calling the `+go-build` target, the OS/arch that is used is the native, but in some cases, eg on Docker Desktop on Mac OS X, emulation is used, thus the $NATIVEPLATFORM and $TARGETPLATFORM variables incorrectly report linux/amd64. The USERPLATFORM var though always returns the correct OS/arch of the host that is triggering the build. When calling the `+build` target locally, the expectation is to produce a binary for our own system, so that we can use/test it afterwards. Thus, this commit changes the `+build` target to call `+go-build` but for platform=$USERPLATFORM. Co-authored-by: Nic Cope Signed-off-by: Theo Chatzimichos --- Earthfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Earthfile b/Earthfile index 5d5bf5346..4fa7bf6e9 100644 --- a/Earthfile +++ b/Earthfile @@ -25,6 +25,8 @@ lint: # build builds Crossplane for your native OS and architecture. build: + ARG USERPLATFORM + BUILD --platform=$USERPLATFORM +go-build BUILD +image BUILD +helm-build From bc5cfbd442475dc57aa4468ebde117e7da7adaf0 Mon Sep 17 00:00:00 2001 From: Theo Chatzimichos Date: Mon, 10 Jun 2024 22:46:51 +0200 Subject: [PATCH 294/370] fix(Earthfile): Add .exe extension in the windows binaries This is a regression compared to the Makefile era, see for example https://releases.crossplane.io/stable/v1.16.0/bin/windows_amd64/ and also what install.sh expects. Signed-off-by: Theo Chatzimichos --- Earthfile | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Earthfile b/Earthfile index 4fa7bf6e9..6ca83ac2b 100644 --- a/Earthfile +++ b/Earthfile @@ -154,12 +154,16 @@ go-build: ARG GOFLAGS="-ldflags=-X=github.com/crossplane/crossplane/internal/version.version=${CROSSPLANE_VERSION}" ARG CGO_ENABLED=0 FROM +go-modules + LET ext = "" + IF [ "$GOOS" = "windows" ] + SET ext = ".exe" + END CACHE --id go-build --sharing shared /root/.cache/go-build COPY --dir apis/ cmd/ internal/ pkg/ . - RUN go build -o crossplane ./cmd/crossplane - RUN go build -o crank ./cmd/crank - SAVE ARTIFACT crossplane AS LOCAL _output/bin/${GOOS}_${GOARCH}/crossplane - SAVE ARTIFACT crank AS LOCAL _output/bin/${GOOS}_${GOARCH}/crank + RUN go build -o crossplane${ext} ./cmd/crossplane + RUN go build -o crank${ext} ./cmd/crank + SAVE ARTIFACT crossplane${ext} AS LOCAL _output/bin/${GOOS}_${GOARCH}/crossplane${ext} + SAVE ARTIFACT crank${ext} AS LOCAL _output/bin/${GOOS}_${GOARCH}/crank${ext} # go-multiplatform-build builds Crossplane binaries for all supported OS # and architectures. From fa8fe894d066acbd62d60d26d62ec5d25b640c39 Mon Sep 17 00:00:00 2001 From: Blake R <85771645+blakeromano@users.noreply.github.com> Date: Thu, 20 Jun 2024 13:14:36 -0700 Subject: [PATCH 295/370] Add Topology Spread Constraints to Helm Chart Signed-off-by: Blake R <85771645+blakeromano@users.noreply.github.com> --- cluster/charts/crossplane/README.md | 2 ++ cluster/charts/crossplane/templates/deployment.yaml | 3 +++ .../charts/crossplane/templates/rbac-manager-deployment.yaml | 3 +++ cluster/charts/crossplane/values.yaml | 4 ++++ 4 files changed, 12 insertions(+) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 2fcd5e46d..9586a3434 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -102,6 +102,7 @@ and their default values. | `rbacManager.replicas` | The number of RBAC Manager pod `replicas` to deploy. | `1` | | `rbacManager.skipAggregatedClusterRoles` | Don't install aggregated Crossplane ClusterRoles. | `false` | | `rbacManager.tolerations` | Add `tolerations` to the RBAC Manager pod deployment. | `[]` | +| `rbacManager.topologySpreadConstraints` | Add `topologySpreadConstraints` to the RBAC Manager pod deployment. | `[]` | | `registryCaBundleConfig.key` | The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `registryCaBundleConfig.name` | The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `replicas` | The number of Crossplane pod `replicas` to deploy. | `1` | @@ -124,6 +125,7 @@ and their default values. | `service.customAnnotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | | `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` | | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | +| `topologySpreadConstraints` | Add `topologySpreadConstraints` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | ### Command Line diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index 057e30e81..f42c2396d 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -243,6 +243,9 @@ spec: {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | nindent 8 }} {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.topologySpreadConstraints | nindent 8 }} + {{- end }} {{- with .Values.dnsPolicy }} dnsPolicy: {{ . }} {{- end }} diff --git a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml index aaa5a71ab..342895ebb 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml @@ -118,6 +118,9 @@ spec: {{- if .Values.rbacManager.tolerations }} tolerations: {{ toYaml .Values.rbacManager.tolerations | nindent 6 }} {{- end }} + {{- if .Values.rbacManager.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.rbacManager.topologySpreadConstraints | nindent 6 }} + {{- end }} {{- if .Values.rbacManager.affinity }} affinity: {{ toYaml .Values.rbacManager.affinity | nindent 8 }} {{- end }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 4b88ccf60..48a36e684 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -21,6 +21,8 @@ nodeSelector: {} tolerations: [] # -- Add `affinities` to the Crossplane pod deployment. affinity: {} +# -- Add `topologySpreadConstraints` to the Crossplane pod deployment. +topologySpreadConstraints: [] # -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. hostNetwork: false @@ -89,6 +91,8 @@ rbacManager: tolerations: [] # -- Add `affinities` to the RBAC Manager pod deployment. affinity: {} + # -- Add `topologySpreadConstraints` to the RBAC Manager pod deployment. + topologySpreadConstraints: [] # -- The PriorityClass name to apply to the Crossplane and RBAC Manager pods. priorityClassName: "" From 735c9743b188369a5310a42548b469e1a930f61e Mon Sep 17 00:00:00 2001 From: johnathan-sq Date: Fri, 21 Jun 2024 10:54:05 +1000 Subject: [PATCH 296/370] fix(runtime): remove Docker containers after render completes Signed-off-by: johnathan-sq --- cmd/crank/beta/render/runtime_docker.go | 39 +++++++++++++------- cmd/crank/beta/render/runtime_docker_test.go | 28 +++++++++++++- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/beta/render/runtime_docker.go index c096e216b..9a3aea25f 100644 --- a/cmd/crank/beta/render/runtime_docker.go +++ b/cmd/crank/beta/render/runtime_docker.go @@ -56,11 +56,15 @@ const ( // container once rendering is done. AnnotationValueRuntimeDockerCleanupStop DockerCleanup = "Stop" + // AnnotationValueRuntimeDockerCleanupRemove stops and removes the + // container once rendering is done. + AnnotationValueRuntimeDockerCleanupRemove DockerCleanup = "Remove" + // AnnotationValueRuntimeDockerCleanupOrphan leaves the container running // once rendering is done. AnnotationValueRuntimeDockerCleanupOrphan DockerCleanup = "Orphan" - AnnotationValueRuntimeDockerCleanupDefault = AnnotationValueRuntimeDockerCleanupStop + AnnotationValueRuntimeDockerCleanupDefault = AnnotationValueRuntimeDockerCleanupRemove ) // AnnotationKeyRuntimeDockerPullPolicy can be added to a Function to control how its runtime @@ -90,8 +94,8 @@ type RuntimeDocker struct { // Image to run Image string - // Stop container once rendering is done - Stop bool + // Cleanup controls how the containers are handled after rendering. + Cleanup DockerCleanup // PullPolicy controls how the runtime image is pulled. PullPolicy DockerPullPolicy @@ -116,7 +120,7 @@ func GetDockerPullPolicy(fn pkgv1beta1.Function) (DockerPullPolicy, error) { // GetDockerCleanup extracts Cleanup configuration from the supplied Function. func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { switch c := DockerCleanup(fn.GetAnnotations()[AnnotationKeyRuntimeDockerCleanup]); c { - case AnnotationValueRuntimeDockerCleanupStop, AnnotationValueRuntimeDockerCleanupOrphan: + case AnnotationValueRuntimeDockerCleanupStop, AnnotationValueRuntimeDockerCleanupOrphan, AnnotationValueRuntimeDockerCleanupRemove: return c, nil case "": return AnnotationValueRuntimeDockerCleanupDefault, nil @@ -141,7 +145,7 @@ func GetRuntimeDocker(fn pkgv1beta1.Function, log logging.Logger) (*RuntimeDocke } r := &RuntimeDocker{ Image: fn.Spec.Package, - Stop: cleanup == AnnotationValueRuntimeDockerCleanupStop, + Cleanup: cleanup, PullPolicy: pullPolicy, log: log, } @@ -218,15 +222,24 @@ func (r *RuntimeDocker) Start(ctx context.Context) (RuntimeContext, error) { return RuntimeContext{}, errors.Wrap(err, "cannot start Docker container") } - stop := func(_ context.Context) error { - r.log.Debug("Container left running", "container", rsp.ID, "image", r.Image) - return nil - } - if r.Stop { - stop = func(ctx context.Context) error { - err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}) - return errors.Wrap(err, "cannot stop Docker container") + stop := func(ctx context.Context) error { + switch r.Cleanup { + case AnnotationValueRuntimeDockerCleanupOrphan: + r.log.Debug("Container left running", "container", rsp.ID, "image", r.Image) + return nil + case AnnotationValueRuntimeDockerCleanupStop: + if err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}); err != nil { + return errors.Wrap(err, "cannot stop Docker container") + } + case AnnotationValueRuntimeDockerCleanupRemove: + if err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}); err != nil { + return errors.Wrap(err, "cannot stop Docker container") + } + if err := c.ContainerRemove(ctx, rsp.ID, container.RemoveOptions{}); err != nil { + return errors.Wrap(err, "cannot remove Docker container") + } } + return nil } return RuntimeContext{Target: addr, Stop: stop}, nil diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/beta/render/runtime_docker_test.go index 467593369..5852d4d29 100644 --- a/cmd/crank/beta/render/runtime_docker_test.go +++ b/cmd/crank/beta/render/runtime_docker_test.go @@ -77,7 +77,7 @@ func TestGetRuntimeDocker(t *testing.T) { want: want{ rd: &RuntimeDocker{ Image: "test-image-from-annotation", - Stop: false, + Cleanup: AnnotationValueRuntimeDockerCleanupOrphan, PullPolicy: AnnotationValueRuntimeDockerPullPolicyAlways, }, }, @@ -99,7 +99,7 @@ func TestGetRuntimeDocker(t *testing.T) { want: want{ rd: &RuntimeDocker{ Image: "test-package", - Stop: true, + Cleanup: AnnotationValueRuntimeDockerCleanupRemove, PullPolicy: AnnotationValueRuntimeDockerPullPolicyIfNotPresent, }, }, @@ -144,6 +144,30 @@ func TestGetRuntimeDocker(t *testing.T) { err: cmpopts.AnyError, }, }, + "AnnotationsCleanupSetToStop": { + reason: "should return a RuntimeDocker with all fields set according to the supplied Function's annotations", + args: args{ + fn: v1beta1.Function{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupStop), + }, + }, + Spec: v1beta1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: "test-package", + }, + }, + }, + }, + want: want{ + rd: &RuntimeDocker{ + Image: "test-package", + Cleanup: AnnotationValueRuntimeDockerCleanupStop, + PullPolicy: AnnotationValueRuntimeDockerPullPolicyIfNotPresent, + }, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { From ca6cbd0cef1351eaaadd63d80d0174c8e1e66272 Mon Sep 17 00:00:00 2001 From: gotwarlost Date: Wed, 22 May 2024 16:28:23 -0700 Subject: [PATCH 297/370] improve trace client performance using concurrent resource load fixes #5707 Add a loader with configurable concurrency to load resources in concurrent manner. The xrm client delegates to the loader for resource load and supports a functional option to set the concurrency. Add a `--concurrency` flag for the `crank beta trace` command and configure the xrm client appropriately. Signed-off-by: gotwarlost --- .../trace/internal/resource/xrm/client.go | 44 ++--- .../trace/internal/resource/xrm/loader.go | 129 +++++++++++++++ .../internal/resource/xrm/loader_test.go | 151 ++++++++++++++++++ cmd/crank/beta/trace/trace.go | 11 +- 4 files changed, 315 insertions(+), 20 deletions(-) create mode 100644 cmd/crank/beta/trace/internal/resource/xrm/loader.go create mode 100644 cmd/crank/beta/trace/internal/resource/xrm/loader_test.go diff --git a/cmd/crank/beta/trace/internal/resource/xrm/client.go b/cmd/crank/beta/trace/internal/resource/xrm/client.go index 336c80860..3bffc15d5 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/client.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/client.go @@ -33,11 +33,14 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) +const defaultConcurrency = 5 + // Client to get a Resource with all its children. type Client struct { getConnectionSecrets bool - client client.Client + client client.Client + concurrency int } // ResourceClientOption is a functional option for a Client. @@ -50,12 +53,20 @@ func WithConnectionSecrets(v bool) ResourceClientOption { } } +// WithConcurrency is a functional option that sets the concurrency for the resource load. +func WithConcurrency(n int) ResourceClientOption { + return func(c *Client) { + c.concurrency = n + } +} + // NewClient returns a new Client. func NewClient(in client.Client, opts ...ResourceClientOption) (*Client, error) { uClient := xpunstructured.NewClient(in) c := &Client{ - client: uClient, + client: uClient, + concurrency: defaultConcurrency, } for _, o := range opts { @@ -67,25 +78,20 @@ func NewClient(in client.Client, opts ...ResourceClientOption) (*Client, error) // GetResourceTree returns the requested Crossplane Resource and all its children. func (kc *Client) GetResourceTree(ctx context.Context, root *resource.Resource) (*resource.Resource, error) { - // Set up a FIFO queue to traverse the resource tree breadth first. - queue := []*resource.Resource{root} - - for len(queue) > 0 { - // Pop the first element from the queue. - res := queue[0] - queue = queue[1:] - - refs := getResourceChildrenRefs(res, kc.getConnectionSecrets) - - for i := range refs { - child := resource.GetResource(ctx, kc.client, &refs[i]) + q := newLoader(root, kc) + q.load(ctx, kc.concurrency) + return root, nil +} - res.Children = append(res.Children, child) - queue = append(queue, child) - } - } +// loadResource returns the resource for the specified object reference. +func (kc *Client) loadResource(ctx context.Context, ref *v1.ObjectReference) *resource.Resource { + return resource.GetResource(ctx, kc.client, ref) +} - return root, nil +// getResourceChildrenRefs returns the references to the children for the given +// Resource, assuming it's a Crossplane resource, XR or XRC. +func (kc *Client) getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference { + return getResourceChildrenRefs(r, kc.getConnectionSecrets) } // getResourceChildrenRefs returns the references to the children for the given diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader.go b/cmd/crank/beta/trace/internal/resource/xrm/loader.go new file mode 100644 index 000000000..d6686f402 --- /dev/null +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xrm + +import ( + "context" + "sync" + + v1 "k8s.io/api/core/v1" + + "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" +) + +// channelCapacity is the buffer size of the processing channel, should be a high value +// so that there is no blocking. Correctness of processing does not depend on the channel capacity. +var channelCapacity = 1000 //nolint:gochecknoglobals // we treat this as constant only overrideable for tests. + +// workItem maintains the relationship of a resource to be loaded with its parent +// such that the resource that is loaded can be added as a child. +type workItem struct { + parent *resource.Resource + child v1.ObjectReference +} + +// resourceLoader is a delegate that loads resources and returns child resource refs. +type resourceLoader interface { + loadResource(ctx context.Context, ref *v1.ObjectReference) *resource.Resource + getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference +} + +// loader loads resources concurrently. +type loader struct { + root *resource.Resource // the root resource for which the tree is loaded + l resourceLoader // the resource loader + resourceLock sync.Mutex // lock when updating the children of any resource + processing sync.WaitGroup // "counter" to track requests in flight + ch chan workItem // processing channel + done chan struct{} // done channel, signaled when all resources are loaded +} + +// newLoader creates a loader for the root resource. +func newLoader(root *resource.Resource, rl resourceLoader) *loader { + l := &loader{ + l: rl, + ch: make(chan workItem, channelCapacity), + done: make(chan struct{}), + root: root, + } + return l +} + +// load loads the full resource tree in a concurrent manner. +func (l *loader) load(ctx context.Context, concurrency int) { + // make sure counters are incremented for root child refs before starting concurrent processing + refs := l.l.getResourceChildrenRefs(ctx, l.root) + l.addRefs(l.root, refs) + + // signal the done channel after all items are processed + go func() { + l.processing.Wait() + close(l.done) + }() + + if concurrency < 1 { + concurrency = defaultConcurrency + } + var wg sync.WaitGroup + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-l.done: + return + case item := <-l.ch: + l.processItem(ctx, item) + } + } + }() + } + wg.Wait() +} + +// addRefs adds work items to the queue. +func (l *loader) addRefs(parent *resource.Resource, refs []v1.ObjectReference) { + // ensure counters are updated synchronously + l.processing.Add(len(refs)) + // free up the current processing routine even if the channel blocks. + go func() { + for _, ref := range refs { + l.ch <- workItem{ + parent: parent, + child: ref, + } + } + }() +} + +// processItem processes a single work item in the queue and decrements the in-process counter +// after adding child references. +func (l *loader) processItem(ctx context.Context, item workItem) { + defer l.processing.Done() + res := l.l.loadResource(ctx, &item.child) + refs := l.l.getResourceChildrenRefs(ctx, res) + l.updateChild(item, res) + l.addRefs(res, refs) +} + +// updateChild adds the supplied child resource to its parent. +func (l *loader) updateChild(item workItem, res *resource.Resource) { + l.resourceLock.Lock() + item.parent.Children = append(item.parent.Children, res) + l.resourceLock.Unlock() +} diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go new file mode 100644 index 000000000..a57e74318 --- /dev/null +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go @@ -0,0 +1,151 @@ +package xrm + +import ( + "context" + "fmt" + "math/rand" + "regexp" + "strconv" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" +) + +var reNum = regexp.MustCompile(`-(\d+)$`) + +type simpleGenerator struct { + childDepth int + numItems int +} + +func (d *simpleGenerator) createResource(apiVersion, kind, name string) *resource.Resource { + obj := map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]any{ + "name": name, + }, + } + return &resource.Resource{Unstructured: unstructured.Unstructured{Object: obj}} +} + +func (d *simpleGenerator) createRefAtDepth(depth int) v1.ObjectReference { + prefix := "comp-res" + if depth == d.childDepth { + prefix = "managed-res" + } + return v1.ObjectReference{ + Kind: fmt.Sprintf("Depth%d", depth), + Name: fmt.Sprintf("%s-%d-%d", prefix, rand.Int(), depth), + APIVersion: "example.com/v1", + } +} + +func (d *simpleGenerator) createResourceFromRef(ref *v1.ObjectReference) *resource.Resource { + return d.createResource(ref.APIVersion, ref.Kind, ref.Name) +} + +func (d *simpleGenerator) loadResource(_ context.Context, ref *v1.ObjectReference) *resource.Resource { + return d.createResourceFromRef(ref) +} + +func (d *simpleGenerator) depthFromResource(res *resource.Resource) int { + ret := 0 + matches := reNum.FindStringSubmatch(res.Unstructured.GetName()) + if len(matches) > 0 { + n, err := strconv.Atoi(matches[1]) + if err != nil { + panic(err) + } + ret = n + } + return ret +} + +func (d *simpleGenerator) getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference { + depth := d.depthFromResource(r) + if depth == d.childDepth { + return nil + } + var ret []v1.ObjectReference + for i := 0; i < d.numItems; i++ { + ret = append(ret, d.createRefAtDepth(depth+1)) + } + return ret +} + +var _ resourceLoader = &simpleGenerator{} + +func countItems(root *resource.Resource) int { + ret := 1 + for _, child := range root.Children { + ret += countItems(child) + } + return ret +} + +func TestLoader(t *testing.T) { + tests := []struct { + name string + childDepth int + numItems int + channelCapacity int + concurrency int + expectedResources int + }{ + { + name: "simple", + childDepth: 3, + numItems: 3, + expectedResources: 1 + 3 + 9 + 27, + }, + { + name: "blocking buffer", + channelCapacity: 1, + concurrency: 1, + childDepth: 3, + numItems: 10, + expectedResources: 1 + 10 + 100 + 1000, + }, + { + name: "no children at root", + childDepth: 0, + numItems: 0, + expectedResources: 1, + }, + { + name: "uses default concurrency", + concurrency: -1, + childDepth: 3, + numItems: 3, + expectedResources: 1 + 3 + 9 + 27, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + orig := channelCapacity + defer func() { channelCapacity = orig }() + + if test.channelCapacity > 0 { + channelCapacity = test.channelCapacity + } + concurrency := defaultConcurrency + if test.concurrency != 0 { + concurrency = test.concurrency + } + sg := &simpleGenerator{childDepth: test.childDepth, numItems: test.numItems} + rootRef := sg.createRefAtDepth(0) + root := sg.createResourceFromRef(&rootRef) + l := newLoader(root, sg) + l.load(context.Background(), concurrency) + n := countItems(root) + if test.expectedResources != n { + t.Errorf("resource count mismatch: want %d, got %d", test.expectedResources, n) + } + }) + } +} diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 98704c9c0..f0f6138dc 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -69,6 +69,7 @@ type Cmd struct { ShowPackageDependencies string `default:"unique" enum:"unique,all,none" help:"Show package dependencies in the output. One of: unique, all, none." name:"show-package-dependencies"` ShowPackageRevisions string `default:"active" enum:"active,all,none" help:"Show package revisions in the output. One of: active, all, none." name:"show-package-revisions"` ShowPackageRuntimeConfigs bool `default:"false" help:"Show package runtime configs in the output." name:"show-package-runtime-configs"` + Concurrency int `default:"5" help:"load concurrency" name:"concurrency"` } // Help returns help message for the trace command. @@ -139,6 +140,11 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger.Debug("Found kubeconfig") + // XXX: this needs to be made configurable - see TODO on line 64 + // I used the values below for checking timing as concurrency increases + // kubeconfig.QPS = 50 + // kubeconfig.Burst = 100 + client, err := client.New(kubeconfig, client.Options{ Scheme: scheme.Scheme, }) @@ -208,7 +214,10 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { } default: logger.Debug("Requested resource is not a package, assumed to be an XR, XRC or MR") - treeClient, err = xrm.NewClient(client, xrm.WithConnectionSecrets(c.ShowConnectionSecrets)) + treeClient, err = xrm.NewClient(client, + xrm.WithConnectionSecrets(c.ShowConnectionSecrets), + xrm.WithConcurrency(c.Concurrency), + ) if err != nil { return errors.Wrap(err, errInitKubeClient) } From 31160f9c8b93dfb1cd2e6ac9d1fcb8c042d9fc4d Mon Sep 17 00:00:00 2001 From: gotwarlost Date: Mon, 27 May 2024 11:01:00 -0700 Subject: [PATCH 298/370] sort children, set default QPS and burst, update test structure * sorts children after concurrent load using the same algorithm that crossplane uses to sort resource refs * sets default QPS and burst when not set to the same values as a recent PR * updates test structure to match existing tests Signed-off-by: gotwarlost --- .../trace/internal/resource/xrm/loader.go | 13 +++ .../internal/resource/xrm/loader_test.go | 98 ++++++++++++------- cmd/crank/beta/trace/trace.go | 10 +- 3 files changed, 79 insertions(+), 42 deletions(-) diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader.go b/cmd/crank/beta/trace/internal/resource/xrm/loader.go index d6686f402..19a07a892 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/loader.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader.go @@ -18,6 +18,7 @@ package xrm import ( "context" + "sort" "sync" v1 "k8s.io/api/core/v1" @@ -94,6 +95,18 @@ func (l *loader) load(ctx context.Context, concurrency int) { }() } wg.Wait() + sortRefs(l.root) +} + +func sortRefs(root *resource.Resource) { + for _, child := range root.Children { + sortRefs(child) + } + sort.Slice(root.Children, func(i, j int) bool { + l := root.Children[i].Unstructured + r := root.Children[j].Unstructured + return l.GetAPIVersion()+l.GetKind()+l.GetName() < r.GetAPIVersion()+r.GetKind()+r.GetName() + }) } // addRefs adds work items to the queue. diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go index a57e74318..0ad8a8bdf 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go @@ -88,63 +88,85 @@ func countItems(root *resource.Resource) int { } func TestLoader(t *testing.T) { - tests := []struct { - name string - childDepth int - numItems int - channelCapacity int - concurrency int + type want struct { expectedResources int + } + type args struct { + childDepth int + numItems int + channelCapacity int + concurrency int + } + tests := map[string]struct { + reason string + args args + want want }{ - { - name: "simple", - childDepth: 3, - numItems: 3, - expectedResources: 1 + 3 + 9 + 27, + "Basic": { + reason: "simple test with default concurrency", + args: args{ + childDepth: 3, + numItems: 3, + }, + want: want{ + expectedResources: 1 + 3 + 9 + 27, + }, }, - { - name: "blocking buffer", - channelCapacity: 1, - concurrency: 1, - childDepth: 3, - numItems: 10, - expectedResources: 1 + 10 + 100 + 1000, + "BlockingBuffer": { + reason: "in-process resources greater than channel buffer, causing blocking", + args: args{ + channelCapacity: 1, + concurrency: 1, + childDepth: 3, + numItems: 10, + }, + want: want{ + expectedResources: 1 + 10 + 100 + 1000, + }, }, - { - name: "no children at root", - childDepth: 0, - numItems: 0, - expectedResources: 1, + "NoRootChildren": { + reason: "top-level resource has no children", + args: args{ + childDepth: 0, + numItems: 0, + }, + want: want{ + expectedResources: 1, + }, }, - { - name: "uses default concurrency", - concurrency: -1, - childDepth: 3, - numItems: 3, - expectedResources: 1 + 3 + 9 + 27, + "BadConcurrency": { + reason: "invalid concurrency is adjusted to be valid", + args: args{ + concurrency: -1, + childDepth: 3, + numItems: 3, + }, + want: want{ + expectedResources: 1 + 3 + 9 + 27, + }, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { orig := channelCapacity defer func() { channelCapacity = orig }() - if test.channelCapacity > 0 { - channelCapacity = test.channelCapacity + if test.args.channelCapacity > 0 { + channelCapacity = test.args.channelCapacity } concurrency := defaultConcurrency - if test.concurrency != 0 { - concurrency = test.concurrency + if test.args.concurrency != 0 { + concurrency = test.args.concurrency } - sg := &simpleGenerator{childDepth: test.childDepth, numItems: test.numItems} + sg := &simpleGenerator{childDepth: test.args.childDepth, numItems: test.args.numItems} rootRef := sg.createRefAtDepth(0) root := sg.createResourceFromRef(&rootRef) l := newLoader(root, sg) l.load(context.Background(), concurrency) n := countItems(root) - if test.expectedResources != n { - t.Errorf("resource count mismatch: want %d, got %d", test.expectedResources, n) + if test.want.expectedResources != n { + t.Errorf("resource count mismatch: want %d, got %d", test.want.expectedResources, n) } }) } diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index f0f6138dc..50e6229a4 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -140,10 +140,12 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger.Debug("Found kubeconfig") - // XXX: this needs to be made configurable - see TODO on line 64 - // I used the values below for checking timing as concurrency increases - // kubeconfig.QPS = 50 - // kubeconfig.Burst = 100 + if kubeconfig.QPS == 0 { + kubeconfig.QPS = 20 + } + if kubeconfig.Burst == 0 { + kubeconfig.Burst = 30 + } client, err := client.New(kubeconfig, client.Options{ Scheme: scheme.Scheme, From 514727415ad51148599e9ff7f2ce4d95a4af7f79 Mon Sep 17 00:00:00 2001 From: gotwarlost Date: Mon, 27 May 2024 11:07:48 -0700 Subject: [PATCH 299/370] remove duplicate QPS/ burst setting Signed-off-by: gotwarlost --- cmd/crank/beta/trace/trace.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index 50e6229a4..924c05934 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -140,13 +140,6 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger.Debug("Found kubeconfig") - if kubeconfig.QPS == 0 { - kubeconfig.QPS = 20 - } - if kubeconfig.Burst == 0 { - kubeconfig.Burst = 30 - } - client, err := client.New(kubeconfig, client.Options{ Scheme: scheme.Scheme, }) From df75bcaaae93e7a956d44f0c62fe3a7982b70f19 Mon Sep 17 00:00:00 2001 From: gotwarlost Date: Mon, 24 Jun 2024 12:44:52 -0700 Subject: [PATCH 300/370] resolve PR comments * remove context from the loader interface when not needed * change member name from l to rl * simplify tests to lose regexes * add concurrency docs * remove module global for channel capacity, pass in as parameter * use range over ints instead of for loops since linter complains otherwise * do not run goroutines when there are no child refs to add * add more docs for sorting logic and why it is needed Signed-off-by: gotwarlost --- .../trace/internal/resource/xrm/client.go | 5 +- .../trace/internal/resource/xrm/loader.go | 30 +++++++---- .../internal/resource/xrm/loader_test.go | 54 +++++++++++-------- 3 files changed, 53 insertions(+), 36 deletions(-) diff --git a/cmd/crank/beta/trace/internal/resource/xrm/client.go b/cmd/crank/beta/trace/internal/resource/xrm/client.go index 3bffc15d5..b143d2724 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/client.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/client.go @@ -33,6 +33,7 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) +// defaultConcurrency is the concurrency using which the resource tree if loaded when not explicitly specified. const defaultConcurrency = 5 // Client to get a Resource with all its children. @@ -78,7 +79,7 @@ func NewClient(in client.Client, opts ...ResourceClientOption) (*Client, error) // GetResourceTree returns the requested Crossplane Resource and all its children. func (kc *Client) GetResourceTree(ctx context.Context, root *resource.Resource) (*resource.Resource, error) { - q := newLoader(root, kc) + q := newLoader(root, kc, defaultChannelCapacity) q.load(ctx, kc.concurrency) return root, nil } @@ -90,7 +91,7 @@ func (kc *Client) loadResource(ctx context.Context, ref *v1.ObjectReference) *re // getResourceChildrenRefs returns the references to the children for the given // Resource, assuming it's a Crossplane resource, XR or XRC. -func (kc *Client) getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference { +func (kc *Client) getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference { return getResourceChildrenRefs(r, kc.getConnectionSecrets) } diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader.go b/cmd/crank/beta/trace/internal/resource/xrm/loader.go index 19a07a892..5580faa5d 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/loader.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader.go @@ -26,9 +26,9 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) -// channelCapacity is the buffer size of the processing channel, should be a high value +// defaultChannelCapacity is the buffer size of the processing channel, should be a high value // so that there is no blocking. Correctness of processing does not depend on the channel capacity. -var channelCapacity = 1000 //nolint:gochecknoglobals // we treat this as constant only overrideable for tests. +const defaultChannelCapacity = 1000 // workItem maintains the relationship of a resource to be loaded with its parent // such that the resource that is loaded can be added as a child. @@ -40,13 +40,13 @@ type workItem struct { // resourceLoader is a delegate that loads resources and returns child resource refs. type resourceLoader interface { loadResource(ctx context.Context, ref *v1.ObjectReference) *resource.Resource - getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference + getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference } // loader loads resources concurrently. type loader struct { root *resource.Resource // the root resource for which the tree is loaded - l resourceLoader // the resource loader + rl resourceLoader // the resource loader resourceLock sync.Mutex // lock when updating the children of any resource processing sync.WaitGroup // "counter" to track requests in flight ch chan workItem // processing channel @@ -54,9 +54,9 @@ type loader struct { } // newLoader creates a loader for the root resource. -func newLoader(root *resource.Resource, rl resourceLoader) *loader { +func newLoader(root *resource.Resource, rl resourceLoader, channelCapacity int) *loader { l := &loader{ - l: rl, + rl: rl, ch: make(chan workItem, channelCapacity), done: make(chan struct{}), root: root, @@ -67,7 +67,7 @@ func newLoader(root *resource.Resource, rl resourceLoader) *loader { // load loads the full resource tree in a concurrent manner. func (l *loader) load(ctx context.Context, concurrency int) { // make sure counters are incremented for root child refs before starting concurrent processing - refs := l.l.getResourceChildrenRefs(ctx, l.root) + refs := l.rl.getResourceChildrenRefs(l.root) l.addRefs(l.root, refs) // signal the done channel after all items are processed @@ -80,8 +80,9 @@ func (l *loader) load(ctx context.Context, concurrency int) { concurrency = defaultConcurrency } var wg sync.WaitGroup - for i := 0; i < concurrency; i++ { + for range concurrency { wg.Add(1) + // spin up a worker that processes items from the channel until the done channel is signaled. go func() { defer wg.Done() for { @@ -95,6 +96,8 @@ func (l *loader) load(ctx context.Context, concurrency int) { }() } wg.Wait() + // order of children loaded for resources is not deterministic because of concurrent processing. + // Sort children explicitly to make this so. sortRefs(l.root) } @@ -102,6 +105,7 @@ func sortRefs(root *resource.Resource) { for _, child := range root.Children { sortRefs(child) } + // this duplicates the sorting logic from internal/controller/apiextensions/composite/composition_functions.go sort.Slice(root.Children, func(i, j int) bool { l := root.Children[i].Unstructured r := root.Children[j].Unstructured @@ -111,9 +115,13 @@ func sortRefs(root *resource.Resource) { // addRefs adds work items to the queue. func (l *loader) addRefs(parent *resource.Resource, refs []v1.ObjectReference) { + // only perform work and spin up a goroutine if references are present. + if len(refs) == 0 { + return + } // ensure counters are updated synchronously l.processing.Add(len(refs)) - // free up the current processing routine even if the channel blocks. + // free up the current processing routine even if the channel would block. go func() { for _, ref := range refs { l.ch <- workItem{ @@ -128,8 +136,8 @@ func (l *loader) addRefs(parent *resource.Resource, refs []v1.ObjectReference) { // after adding child references. func (l *loader) processItem(ctx context.Context, item workItem) { defer l.processing.Done() - res := l.l.loadResource(ctx, &item.child) - refs := l.l.getResourceChildrenRefs(ctx, res) + res := l.rl.loadResource(ctx, &item.child) + refs := l.rl.getResourceChildrenRefs(res) l.updateChild(item, res) l.addRefs(res, refs) } diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go index 0ad8a8bdf..da2ced64d 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go @@ -4,8 +4,7 @@ import ( "context" "fmt" "math/rand" - "regexp" - "strconv" + "sync" "testing" v1 "k8s.io/api/core/v1" @@ -14,11 +13,21 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) -var reNum = regexp.MustCompile(`-(\d+)$`) - +// simpleGenerator generates a tree of resources for a specific depth and the number of children to +// create at any level. type simpleGenerator struct { childDepth int numItems int + l sync.Mutex // lock for accessing the depth map + depthMap map[string]int // tracks resource names and their depth so that we can stop when the desired depth is reached. +} + +func newSimpleGenerator(childDepth, numItems int) *simpleGenerator { + return &simpleGenerator{ + childDepth: childDepth, + numItems: numItems, + depthMap: map[string]int{}, + } } func (d *simpleGenerator) createResource(apiVersion, kind, name string) *resource.Resource { @@ -32,14 +41,22 @@ func (d *simpleGenerator) createResource(apiVersion, kind, name string) *resourc return &resource.Resource{Unstructured: unstructured.Unstructured{Object: obj}} } +func (d *simpleGenerator) trackResourceDepth(name string, depth int) { + d.l.Lock() + defer d.l.Unlock() + d.depthMap[name] = depth +} + func (d *simpleGenerator) createRefAtDepth(depth int) v1.ObjectReference { prefix := "comp-res" if depth == d.childDepth { prefix = "managed-res" } + name := fmt.Sprintf("%s-%d-%d", prefix, rand.Int(), depth) + d.trackResourceDepth(name, depth) return v1.ObjectReference{ Kind: fmt.Sprintf("Depth%d", depth), - Name: fmt.Sprintf("%s-%d-%d", prefix, rand.Int(), depth), + Name: name, APIVersion: "example.com/v1", } } @@ -53,25 +70,18 @@ func (d *simpleGenerator) loadResource(_ context.Context, ref *v1.ObjectReferenc } func (d *simpleGenerator) depthFromResource(res *resource.Resource) int { - ret := 0 - matches := reNum.FindStringSubmatch(res.Unstructured.GetName()) - if len(matches) > 0 { - n, err := strconv.Atoi(matches[1]) - if err != nil { - panic(err) - } - ret = n - } - return ret + d.l.Lock() + defer d.l.Unlock() + return d.depthMap[res.Unstructured.GetName()] } -func (d *simpleGenerator) getResourceChildrenRefs(_ context.Context, r *resource.Resource) []v1.ObjectReference { +func (d *simpleGenerator) getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference { depth := d.depthFromResource(r) if depth == d.childDepth { return nil } - var ret []v1.ObjectReference - for i := 0; i < d.numItems; i++ { + ret := make([]v1.ObjectReference, 0, d.numItems) + for range d.numItems { ret = append(ret, d.createRefAtDepth(depth+1)) } return ret @@ -149,9 +159,7 @@ func TestLoader(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - orig := channelCapacity - defer func() { channelCapacity = orig }() - + channelCapacity := defaultChannelCapacity if test.args.channelCapacity > 0 { channelCapacity = test.args.channelCapacity } @@ -159,10 +167,10 @@ func TestLoader(t *testing.T) { if test.args.concurrency != 0 { concurrency = test.args.concurrency } - sg := &simpleGenerator{childDepth: test.args.childDepth, numItems: test.args.numItems} + sg := newSimpleGenerator(test.args.childDepth, test.args.numItems) rootRef := sg.createRefAtDepth(0) root := sg.createResourceFromRef(&rootRef) - l := newLoader(root, sg) + l := newLoader(root, sg, channelCapacity) l.load(context.Background(), concurrency) n := countItems(root) if test.want.expectedResources != n { From e2b3b2451a3c1cc9e3e0689963e08ee5554a9628 Mon Sep 17 00:00:00 2001 From: Blake R <85771645+blakeromano@users.noreply.github.com> Date: Wed, 26 Jun 2024 13:31:28 -0700 Subject: [PATCH 301/370] maybe earthly reviewable fix Signed-off-by: Blake R <85771645+blakeromano@users.noreply.github.com> --- cluster/charts/crossplane/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 9586a3434..8213ec695 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -125,7 +125,7 @@ and their default values. | `service.customAnnotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | | `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` | | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | -| `topologySpreadConstraints` | Add `topologySpreadConstraints` to the Crossplane pod deployment. | `[]` | +| `topologySpreadConstraints` | Add `topologySpreadConstraints` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | ### Command Line From 0b75611324407889a6064a17cb9058a6f40c2c36 Mon Sep 17 00:00:00 2001 From: dalton hill Date: Mon, 3 Jun 2024 11:36:15 -0500 Subject: [PATCH 302/370] Composition Function status conditions and claim events. Signed-off-by: dalton hill --- .../fn/proto/v1beta1/run_function.pb.go | 637 +++++++++++++----- .../fn/proto/v1beta1/run_function.proto | 63 +- cmd/crank/beta/validate/validate_test.go | 27 + go.mod | 8 +- go.sum | 12 +- .../apiextensions/claim/reconciler.go | 6 + .../apiextensions/claim/reconciler_test.go | 87 +++ .../composite/composition_functions.go | 62 +- .../composite/composition_functions_test.go | 233 ++++++- .../apiextensions/composite/composition_pt.go | 22 +- .../composite/composition_pt_test.go | 7 +- .../apiextensions/composite/reconciler.go | 152 ++++- .../composite/reconciler_test.go | 636 ++++++++++++++++- .../pkg/revision/establisher_test.go | 4 +- internal/xcrd/crd_test.go | 72 ++ internal/xcrd/schemas.go | 9 + 16 files changed, 1800 insertions(+), 237 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go index ef5f1a8de..20a9660b6 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -150,6 +150,113 @@ func (Severity) EnumDescriptor() ([]byte, []int) { return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} } +// Target of Function results and conditions. +type Target int32 + +const ( + // If the target is unspecified, the result targets the composite resource. + Target_TARGET_UNSPECIFIED Target = 0 + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + Target_TARGET_COMPOSITE Target = 1 + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + Target_TARGET_COMPOSITE_AND_CLAIM Target = 2 +) + +// Enum value maps for Target. +var ( + Target_name = map[int32]string{ + 0: "TARGET_UNSPECIFIED", + 1: "TARGET_COMPOSITE", + 2: "TARGET_COMPOSITE_AND_CLAIM", + } + Target_value = map[string]int32{ + "TARGET_UNSPECIFIED": 0, + "TARGET_COMPOSITE": 1, + "TARGET_COMPOSITE_AND_CLAIM": 2, + } +) + +func (x Target) Enum() *Target { + p := new(Target) + *p = x + return p +} + +func (x Target) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Target) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[2].Descriptor() +} + +func (Target) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[2] +} + +func (x Target) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Target.Descriptor instead. +func (Target) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} +} + +type Status int32 + +const ( + Status_STATUS_CONDITION_UNSPECIFIED Status = 0 + Status_STATUS_CONDITION_UNKNOWN Status = 1 + Status_STATUS_CONDITION_TRUE Status = 2 + Status_STATUS_CONDITION_FALSE Status = 3 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_CONDITION_UNSPECIFIED", + 1: "STATUS_CONDITION_UNKNOWN", + 2: "STATUS_CONDITION_TRUE", + 3: "STATUS_CONDITION_FALSE", + } + Status_value = map[string]int32{ + "STATUS_CONDITION_UNSPECIFIED": 0, + "STATUS_CONDITION_UNKNOWN": 1, + "STATUS_CONDITION_TRUE": 2, + "STATUS_CONDITION_FALSE": 3, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[3].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[3] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} +} + // A RunFunctionRequest requests that the Composition Function be run. type RunFunctionRequest struct { state protoimpl.MessageState @@ -468,6 +575,9 @@ type RunFunctionResponse struct { Context *structpb.Struct `protobuf:"bytes,4,opt,name=context,proto3,oneof" json:"context,omitempty"` // Requirements that must be satisfied for this Function to run successfully. Requirements *Requirements `protobuf:"bytes,5,opt,name=requirements,proto3" json:"requirements,omitempty"` + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + Conditions []*Condition `protobuf:"bytes,6,rep,name=conditions,proto3" json:"conditions,omitempty"` } func (x *RunFunctionResponse) Reset() { @@ -537,6 +647,13 @@ func (x *RunFunctionResponse) GetRequirements() *Requirements { return nil } +func (x *RunFunctionResponse) GetConditions() []*Condition { + if x != nil { + return x.Conditions + } + return nil +} + // RequestMeta contains metadata pertaining to a RunFunctionRequest. type RequestMeta struct { state protoimpl.MessageState @@ -1017,6 +1134,11 @@ type Result struct { Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` // Human-readable details about the result. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + // The resources this result targets. + Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` } func (x *Result) Reset() { @@ -1065,6 +1187,113 @@ func (x *Result) GetMessage() string { return "" } +func (x *Result) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *Result) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +type Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of condition in PascalCase. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Status of the condition. + Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1beta1.Status" json:"status,omitempty"` + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + Message *string `protobuf:"bytes,4,opt,name=message,proto3,oneof" json:"message,omitempty"` + // The resources this condition targets. + Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` +} + +func (x *Condition) Reset() { + *x = Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Condition) ProtoMessage() {} + +func (x *Condition) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Condition.ProtoReflect.Descriptor instead. +func (*Condition) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{13} +} + +func (x *Condition) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Condition) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_STATUS_CONDITION_UNSPECIFIED +} + +func (x *Condition) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *Condition) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Condition) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + var File_apiextensions_fn_proto_v1beta1_run_function_proto protoreflect.FileDescriptor var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ @@ -1147,7 +1376,7 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xf0, 0x02, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xbb, 0x03, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, @@ -1170,120 +1399,161 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, - 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, - 0x67, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, - 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, - 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, - 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, - 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, + 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xee, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x69, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, + 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, - 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, + 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, 0x02, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, - 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, - 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, - 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, - 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x6e, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, - 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x12, 0x3b, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, - 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, - 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x3f, 0x0a, - 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, - 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, - 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, - 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, - 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, - 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, - 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, - 0x0b, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, - 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, - 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, 0x52, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, - 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x12, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x05, 0x72, + 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe0, + 0x01, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x22, 0xf2, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, + 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, + 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, + 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, + 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, + 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, + 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, + 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, + 0x54, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, + 0x49, 0x4d, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, + 0x0a, 0x1c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, + 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, + 0x4c, 0x53, 0x45, 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x78, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, + 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1298,71 +1568,78 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData } -var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity - (*RunFunctionRequest)(nil), // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest - (*Credentials)(nil), // 3: apiextensions.fn.proto.v1beta1.Credentials - (*CredentialData)(nil), // 4: apiextensions.fn.proto.v1beta1.CredentialData - (*Resources)(nil), // 5: apiextensions.fn.proto.v1beta1.Resources - (*RunFunctionResponse)(nil), // 6: apiextensions.fn.proto.v1beta1.RunFunctionResponse - (*RequestMeta)(nil), // 7: apiextensions.fn.proto.v1beta1.RequestMeta - (*Requirements)(nil), // 8: apiextensions.fn.proto.v1beta1.Requirements - (*ResourceSelector)(nil), // 9: apiextensions.fn.proto.v1beta1.ResourceSelector - (*MatchLabels)(nil), // 10: apiextensions.fn.proto.v1beta1.MatchLabels - (*ResponseMeta)(nil), // 11: apiextensions.fn.proto.v1beta1.ResponseMeta - (*State)(nil), // 12: apiextensions.fn.proto.v1beta1.State - (*Resource)(nil), // 13: apiextensions.fn.proto.v1beta1.Resource - (*Result)(nil), // 14: apiextensions.fn.proto.v1beta1.Result - nil, // 15: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - nil, // 16: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry - nil, // 17: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry - nil, // 18: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - nil, // 19: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - nil, // 20: apiextensions.fn.proto.v1beta1.State.ResourcesEntry - nil, // 21: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - (*structpb.Struct)(nil), // 22: google.protobuf.Struct - (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (Target)(0), // 2: apiextensions.fn.proto.v1beta1.Target + (Status)(0), // 3: apiextensions.fn.proto.v1beta1.Status + (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest + (*Credentials)(nil), // 5: apiextensions.fn.proto.v1beta1.Credentials + (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1beta1.CredentialData + (*Resources)(nil), // 7: apiextensions.fn.proto.v1beta1.Resources + (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse + (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.RequestMeta + (*Requirements)(nil), // 10: apiextensions.fn.proto.v1beta1.Requirements + (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1beta1.ResourceSelector + (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1beta1.MatchLabels + (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1beta1.ResponseMeta + (*State)(nil), // 14: apiextensions.fn.proto.v1beta1.State + (*Resource)(nil), // 15: apiextensions.fn.proto.v1beta1.Resource + (*Result)(nil), // 16: apiextensions.fn.proto.v1beta1.Result + (*Condition)(nil), // 17: apiextensions.fn.proto.v1beta1.Condition + nil, // 18: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + nil, // 20: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + nil, // 21: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + nil, // 22: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + nil, // 23: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 24: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 25: google.protobuf.Struct + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration } var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ - 7, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta - 12, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State - 12, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 22, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct - 22, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct - 15, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - 16, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry - 4, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData - 17, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry - 13, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource - 11, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta - 12, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 14, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result - 22, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct - 8, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements - 18, // 15: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - 10, // 16: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels - 19, // 17: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - 23, // 18: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration - 13, // 19: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource - 20, // 20: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry - 22, // 21: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct - 21, // 22: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - 0, // 23: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready - 1, // 24: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity - 5, // 25: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources - 3, // 26: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials - 9, // 27: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector - 13, // 28: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource - 2, // 29: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest - 6, // 30: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse - 30, // [30:31] is the sub-list for method output_type - 29, // [29:30] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 9, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta + 14, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State + 14, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 25, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 25, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 18, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + 19, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + 6, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData + 20, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + 15, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource + 13, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta + 14, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 16, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result + 25, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 10, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements + 17, // 15: apiextensions.fn.proto.v1beta1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1beta1.Condition + 21, // 16: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + 12, // 17: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels + 22, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + 26, // 19: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 15, // 20: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource + 23, // 21: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 25, // 22: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 24, // 23: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 0, // 24: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready + 1, // 25: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity + 2, // 26: apiextensions.fn.proto.v1beta1.Result.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 3, // 27: apiextensions.fn.proto.v1beta1.Condition.status:type_name -> apiextensions.fn.proto.v1beta1.Status + 2, // 28: apiextensions.fn.proto.v1beta1.Condition.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 7, // 29: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources + 5, // 30: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 11, // 31: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector + 15, // 32: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 4, // 33: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 8, // 34: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } @@ -1527,6 +1804,18 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ @@ -1538,13 +1827,15 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { (*ResourceSelector_MatchLabels)(nil), } file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, - NumEnums: 2, - NumMessages: 20, + NumEnums: 4, + NumMessages: 21, NumExtensions: 0, NumServices: 1, }, diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1beta1/run_function.proto index 2a6e0b1ef..18060fb3c 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.proto @@ -121,6 +121,10 @@ message RunFunctionResponse { // Requirements that must be satisfied for this Function to run successfully. Requirements requirements = 5; + + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + repeated Condition conditions = 6; } // RequestMeta contains metadata pertaining to a RunFunctionRequest. @@ -243,6 +247,13 @@ message Result { // Human-readable details about the result. string message = 2; + + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + optional string reason = 3; + + // The resources this result targets. + optional Target target = 4; } // Severity of Function results. @@ -262,4 +273,54 @@ enum Severity { // Normal results are emitted as normal events and debug logs associated // with the composite resource. SEVERITY_NORMAL = 3; -} \ No newline at end of file +} + +// Target of Function results and conditions. +enum Target { + // If the target is unspecified, the result targets the composite resource. + TARGET_UNSPECIFIED = 0; + + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + TARGET_COMPOSITE = 1; + + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + TARGET_COMPOSITE_AND_CLAIM = 2; +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +message Condition { + // Type of condition in PascalCase. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + string reason = 3; + + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + optional string message = 4; + + // The resources this condition targets. + optional Target target = 5; +} + +enum Status { + STATUS_CONDITION_UNSPECIFIED = 0; + + STATUS_CONDITION_UNKNOWN = 1; + + STATUS_CONDITION_TRUE = 2; + + STATUS_CONDITION_FALSE = 3; +} diff --git a/cmd/crank/beta/validate/validate_test.go b/cmd/crank/beta/validate/validate_test.go index 9b6511a39..cb252df13 100644 --- a/cmd/crank/beta/validate/validate_test.go +++ b/cmd/crank/beta/validate/validate_test.go @@ -526,6 +526,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -869,6 +878,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1113,6 +1131,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ diff --git a/go.mod b/go.mod index 9b2287b96..4a107f8dd 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.9.0 - github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8 + github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.2 @@ -120,7 +120,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect - github.com/aws/smithy-go v1.20.2 + github.com/aws/smithy-go v1.20.2 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -134,11 +134,11 @@ require ( github.com/docker/docker-credential-helpers v0.8.2 github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect diff --git a/go.sum b/go.sum index f3109f562..f5dd9f816 100644 --- a/go.sum +++ b/go.sum @@ -125,8 +125,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8 h1:4OtdWor2ixE1Nk+96//Knf63wRiyKFnxFcJx4iM+WY0= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240520203451-fc036618ffd8/go.mod h1:aZ1ODIvtOPFFVZ9oo3qVH/MQHuim24qWg2Tj6n+AIf4= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b h1:XNYG9Px6WsZ8OvfZ/hPDtnglK2jAmmkox1/JUZjXE9I= +github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b/go.mod h1:saPoKGl3PfzzL8Q6PH+/cIjD0ssrmHW/gmiqstWy+0Q= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= @@ -169,8 +169,8 @@ github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= @@ -195,8 +195,8 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index be4cc7d05..d94c9883a 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -473,6 +473,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco cm.SetConditions(xpv1.ReconcileSuccess()) + // Copy any custom status conditions from the XR to the claim. + for _, cType := range xr.GetClaimConditionTypes() { + c := xr.GetCondition(cType) + cm.SetConditions(c) + } + if !resource.IsConditionTrue(xr.GetCondition(xpv1.TypeReady)) { record.Event(cm, event.Normal(reasonBind, "Composite resource is not yet ready")) diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index f8a1ff899..919bae6de 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -535,6 +535,93 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: false}, }, }, + "ClaimConditions": { + reason: "We should copy custom conditions from the XR if seen in the claimConditions array.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // The system conditions are already set. + o.SetConditions(xpv1.ReconcileSuccess()) + o.SetConditions(xpv1.Available()) + // Database was marked as creating in a prior reconciliation. + o.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + o.SetConditions( + // Database has become ready. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + }, + // Bucket is a new condition. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }, + // Internal condition should not be copied over as it is not in + // claimConditions. + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionFalse, + Reason: "Syncing", + }, + ) + // Database and Bucket are claim conditions so they should be + // copied over. + o.SetClaimConditionTypes("DatabaseReady", "BucketReady") + } + return nil + }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConnectionDetailsLastPublishedTime(&now) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(xpv1.Available()) + cm.SetConditions( + // Database condition should have been updated to show ready. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + }, + // Bucket condition should have been created. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }, + ) + })), + }, + opts: []ReconcilerOption{ + WithClaimFinalizer(resource.FinalizerFns{ + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, + }), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { + return true, nil + })), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, } for name, tc := range cases { diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index d3b214c65..d44210a04 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" "sigs.k8s.io/controller-runtime/pkg/client" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/meta" @@ -291,7 +292,8 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // The Function pipeline starts with empty desired state. d := &v1beta1.State{} - events := []event.Event{} + events := []TargetedEvent{} + conditions := []TargetedCondition{} // The Function context starts empty... fctx := &structpb.Struct{Fields: map[string]*structpb.Value{}} @@ -397,22 +399,58 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // We intentionally discard/ignore this after the last Function runs. fctx = rsp.GetContext() + for _, c := range rsp.GetConditions() { + var status corev1.ConditionStatus + switch c.GetStatus() { + case v1beta1.Status_STATUS_CONDITION_TRUE: + status = corev1.ConditionTrue + case v1beta1.Status_STATUS_CONDITION_FALSE: + status = corev1.ConditionFalse + case v1beta1.Status_STATUS_CONDITION_UNKNOWN, v1beta1.Status_STATUS_CONDITION_UNSPECIFIED: + status = corev1.ConditionUnknown + } + + conditions = append(conditions, TargetedCondition{ + Condition: xpv1.Condition{ + Type: xpv1.ConditionType(c.GetType()), + Status: status, + LastTransitionTime: metav1.Now(), + Reason: xpv1.ConditionReason(c.GetReason()), + Message: c.GetMessage(), + }, + Target: convertTarget(c.GetTarget()), + }) + } + // Results of fatal severity stop the Composition process. Other results // are accumulated to be emitted as events by the Reconciler. for _, rs := range rsp.GetResults() { + reason := event.Reason(rs.GetReason()) + if reason == "" { + reason = reasonCompose + } + + e := TargetedEvent{Target: convertTarget(rs.GetTarget())} + switch rs.GetSeverity() { case v1beta1.Severity_SEVERITY_FATAL: - return CompositionResult{}, errors.Errorf(errFmtFatalResult, fn.Step, rs.GetMessage()) + return CompositionResult{Events: events, Conditions: conditions}, errors.Errorf(errFmtFatalResult, fn.Step, rs.GetMessage()) case v1beta1.Severity_SEVERITY_WARNING: - events = append(events, event.Warning(reasonCompose, errors.Errorf("Pipeline step %q: %s", fn.Step, rs.GetMessage()))) + e.Event = event.Warning(reason, errors.New(rs.GetMessage())) + e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) case v1beta1.Severity_SEVERITY_NORMAL: - events = append(events, event.Normal(reasonCompose, fmt.Sprintf("Pipeline step %q: %s", fn.Step, rs.GetMessage()))) + e.Event = event.Normal(reason, rs.GetMessage()) + e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) case v1beta1.Severity_SEVERITY_UNSPECIFIED: // We could hit this case if a Function was built against a newer // protobuf than this build of Crossplane, and the new protobuf // introduced a severity that we don't know about. - events = append(events, event.Warning(reasonCompose, errors.Errorf("Pipeline step %q returned a result of unknown severity (assuming warning): %s", fn.Step, rs.GetMessage()))) + e.Event = event.Warning(reason, errors.Errorf("Pipeline step %q returned a result of unknown severity (assuming warning): %s", fn.Step, rs.GetMessage())) + // Explicitly target only the XR, since we're including information + // about an exceptional, unexpected state. + e.Target = CompositionTargetComposite } + events = append(events, e) } } @@ -531,7 +569,10 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // We mark the resource as not synced, so that once we get to // decide the XR's Synced condition, we can set it to false if // any of the resources didn't sync successfully. - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyCD, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyCD, name)), + Target: CompositionTargetComposite, + }) // NOTE(phisco): here we behave differently w.r.t. the native // p&t composer, as we respect the readiness reported by // functions, while there we defaulted to also set ready false @@ -572,7 +613,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{}, errors.Wrap(err, errApplyXRStatus) } - return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events}, nil + return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events, Conditions: conditions}, nil } // ComposedFieldOwnerName generates a unique field owner name @@ -935,3 +976,10 @@ func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client. return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") } } + +func convertTarget(t v1beta1.Target) CompositionTarget { + if t == v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM { + return CompositionTargetCompositeAndClaim + } + return CompositionTargetComposite +} diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 062c71f4b..40aafeef1 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -229,14 +229,57 @@ func TestFunctionCompose(t *testing.T) { }, }, "FatalFunctionResultError": { - reason: "We should return any fatal function results as an error", + reason: "We should return any fatal function results as an error. Any conditions returned by the function should be passed up. Any results returned by the function prior to the fatal result should be passed up.", params: params{ r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - r := &v1beta1.Result{ - Severity: v1beta1.Severity_SEVERITY_FATAL, - Message: "oh no", - } - return &v1beta1.RunFunctionResponse{Results: []*v1beta1.Result{r}}, nil + return &v1beta1.RunFunctionResponse{ + Results: []*v1beta1.Result{ + // This result should be passed up as it was sent before the fatal + // result. The reason should be defaulted. The target should be + // defaulted. + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + Message: "A result before the fatal result with the default Reason.", + }, + // This result should be passed up as it was sent before the fatal + // result. The reason should be kept. The target should be kept. + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + Reason: ptr.To("SomeReason"), + Message: "A result before the fatal result with a specific Reason.", + Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + // The fatal result + { + Severity: v1beta1.Severity_SEVERITY_FATAL, + Message: "oh no", + }, + // This result should not be passed up as it was sent after the + // fatal result. + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + Message: "a result after the fatal result", + }, + }, + Conditions: []*v1beta1.Condition{ + // A condition returned by the function with only the minimum + // necessary values. + { + Type: "DatabaseReady", + Status: v1beta1.Status_STATUS_CONDITION_FALSE, + Reason: "Creating", + }, + // A condition returned by the function with all optional values + // given. + { + Type: "DeploymentReady", + Status: v1beta1.Status_STATUS_CONDITION_TRUE, + Reason: "Available", + Message: ptr.To("The deployment is ready."), + Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + }, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -264,6 +307,51 @@ func TestFunctionCompose(t *testing.T) { }, want: want{ err: errors.Errorf(errFmtFatalResult, "run-cool-function", "oh no"), + res: CompositionResult{ + Events: []TargetedEvent{ + // The event with minimum values. + { + Event: event.Event{ + Type: "Normal", + Reason: "ComposeResources", + Message: "A result before the fatal result with the default Reason.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, + }, + // The event that provides all possible values. + { + Event: event.Event{ + Type: "Normal", + Reason: "SomeReason", + Message: "A result before the fatal result with a specific Reason.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + }, + Conditions: []TargetedCondition{ + // The condition with minimum values. + { + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: "False", + Reason: "Creating", + }, + Target: CompositionTargetComposite, + }, + // The condition that provides all possible values. + { + Condition: xpv1.Condition{ + Type: "DeploymentReady", + Status: "True", + Reason: "Available", + Message: "The deployment is ready.", + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, }, }, "RenderComposedResourceMetadataError": { @@ -620,6 +708,30 @@ func TestFunctionCompose(t *testing.T) { Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, Message: "A result of unspecified severity", }, + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + Reason: ptr.To("SomeReason"), + Message: "A result with all values explicitly set.", + Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + Conditions: []*v1beta1.Condition{ + // A condition returned by the function with only the minimum + // necessary values. + { + Type: "DatabaseReady", + Status: v1beta1.Status_STATUS_CONDITION_FALSE, + Reason: "Creating", + }, + // A condition returned by the function with all optional values + // given. + { + Type: "DeploymentReady", + Status: v1beta1.Status_STATUS_CONDITION_TRUE, + Reason: "Available", + Message: ptr.To("The deployment is ready."), + Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, }, } return rsp, nil @@ -692,21 +804,62 @@ func TestFunctionCompose(t *testing.T) { ConnectionDetails: managed.ConnectionDetails{ "from": []byte("function-pipeline"), }, - Events: []event.Event{ + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: "Normal", + Reason: "ComposeResources", + Message: "A normal result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, + }, + { + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "A warning result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, + }, { - Type: "Normal", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A normal result", + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", + }, + Target: CompositionTargetComposite, + }, + { + Event: event.Event{ + Type: "Normal", + Reason: "SomeReason", + Message: "A result with all values explicitly set.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetCompositeAndClaim, }, + }, + Conditions: []TargetedCondition{ + // The condition with minimum values. { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A warning result", + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: "False", + Reason: "Creating", + }, + Target: CompositionTargetComposite, }, + // The condition that provides all possible values. { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", + Condition: xpv1.Condition{ + Type: "DeploymentReady", + Status: "True", + Reason: "Available", + Message: "The deployment is ready.", + }, + Target: CompositionTargetCompositeAndClaim, }, }, }, @@ -785,6 +938,13 @@ func TestFunctionCompose(t *testing.T) { Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, Message: "A result of unspecified severity", }, + { + // If the severity is unknown, the target should be force set + // to target only the XR. + Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, + Message: "A result of unspecified severity targeting the claim should be forced to only target the XR.", + Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, }, Requirements: requirements, } @@ -893,21 +1053,42 @@ func TestFunctionCompose(t *testing.T) { ConnectionDetails: managed.ConnectionDetails{ "from": []byte("function-pipeline"), }, - Events: []event.Event{ + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: "Normal", + Reason: "ComposeResources", + Message: "A normal result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, + }, { - Type: "Normal", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A normal result", + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "A warning result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, }, { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A warning result", + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", + }, + Target: CompositionTargetComposite, }, + // If the severity is unknown, the target should be force set + // to target only the XR. { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity targeting the claim should be forced to only target the XR.", + }, + Target: CompositionTargetComposite, }, }, }, diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 45e8c7fcc..293952528 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -183,7 +183,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re } } - events := make([]event.Event, 0) + events := make([]TargetedEvent, 0) // We optimistically render all composed resources that we are able to with // the expectation that any that we fail to render will subsequently have @@ -213,17 +213,26 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re rendered := true if err := RenderFromCompositeAndEnvironmentPatches(r, xr, req.Environment, ta.Template.Patches); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderFromCompositePatches, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderFromCompositePatches, name)), + Target: CompositionTargetComposite, + }) rendered = false } if err := RenderComposedResourceMetadata(r, xr, ResourceName(ptr.Deref(ta.Template.Name, ""))); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderMetadata, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderMetadata, name)), + Target: CompositionTargetComposite, + }) rendered = false } if err := c.composed.GenerateName(ctx, r); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtGenerateName, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtGenerateName, name)), + Target: CompositionTargetComposite, + }) rendered = false } @@ -274,7 +283,10 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // run again the composition after some other resource is // created or updated successfully. So, we emit a warning event // and move on. - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1)))), + Target: CompositionTargetComposite, + }) // We unset the cd here so that we don't try to observe it // later. This will also mean we report it as not ready and not // synced. Resulting in the XR being reported as not ready nor diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index efb8e3059..edc2fb29a 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -467,8 +467,11 @@ func TestPTCompose(t *testing.T) { }, }, ConnectionDetails: details, - Events: []event.Event{ - event.Warning(reasonCompose, errors.Wrapf(errBoom, errFmtGenerateName, "uncool-resource")), + Events: []TargetedEvent{ + { + Event: event.Warning(reasonCompose, errors.Wrapf(errBoom, errFmtGenerateName, "uncool-resource")), + Target: CompositionTargetComposite, + }, }, }, }, diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 47c13396c..64d36b373 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -24,9 +24,11 @@ import ( "strconv" "time" + corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -38,6 +40,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -72,6 +75,8 @@ const ( errInvalidResources = "some resources were invalid, check events" errRenderCD = "cannot render composed resource" errSyncResources = "cannot sync composed resources" + errGetClaim = "cannot get referenced claim" + errParseClaimRef = "cannot parse claim reference" reconcilePausedMsg = "Reconciliation (including deletion) is paused via the pause annotation" ) @@ -86,6 +91,11 @@ const ( reasonPaused event.Reason = "ReconciliationPaused" ) +// Condition reasons. +const ( + reasonFatalError xpv1.ConditionReason = "FatalError" +) + // ControllerName returns the recommended name for controllers that use this // package to reconcile a particular kind of composite resource. func ControllerName(name string) string { @@ -185,7 +195,48 @@ type CompositionRequest struct { type CompositionResult struct { Composed []ComposedResource ConnectionDetails managed.ConnectionDetails - Events []event.Event + Events []TargetedEvent + Conditions []TargetedCondition +} + +// A CompositionTarget is the target of a composition event or condition. +type CompositionTarget string + +// Composition event and condition targets. +const ( + CompositionTargetComposite CompositionTarget = "Composite" + CompositionTargetCompositeAndClaim CompositionTarget = "CompositeAndClaim" +) + +// A TargetedEvent represents an event produced by the composition process. It +// can target either the XR only, or both the XR and the claim. +type TargetedEvent struct { + event.Event + Target CompositionTarget + // Detail about the event to be included in the composite resource event but + // not the claim. + Detail string +} + +// AsEvent produces the base event. +func (e *TargetedEvent) AsEvent() event.Event { + return event.Event{Type: e.Type, Reason: e.Reason, Message: e.Message, Annotations: e.Annotations} +} + +// AsDetailedEvent produces an event with additional detail if available. +func (e *TargetedEvent) AsDetailedEvent() event.Event { + if e.Detail == "" { + return e.AsEvent() + } + msg := fmt.Sprintf("%s: %s", e.Detail, e.Message) + return event.Event{Type: e.Type, Reason: e.Reason, Message: msg, Annotations: e.Annotations} +} + +// A TargetedCondition represents a condition produced by the composition +// process. It can target either the XR only, or both the XR and the claim. +type TargetedCondition struct { + xpv1.Condition + Target CompositionTarget } // A Composer composes (i.e. creates, updates, or deletes) resources given the @@ -608,6 +659,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } + err = errors.Wrap(err, errCompose) r.record.Event(xr, event.Warning(reasonCompose, err)) if kerrors.IsInvalid(err) { @@ -621,6 +673,22 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco err = errors.Wrap(errors.New(errInvalidResources), errCompose) } xr.SetConditions(xpv1.ReconcileError(err)) + + meta := r.handleCommonCompositionResult(ctx, res, xr) + // We encountered a fatal error. For any custom status conditions that were + // not received due to the fatal error, mark them as unknown. + for _, c := range xr.GetConditions() { + if xpv1.IsSystemConditionType(c.Type) { + continue + } + if !meta.conditionTypesSeen[c.Type] { + c.Status = corev1.ConditionUnknown + c.Reason = reasonFatalError + c.Message = "A fatal error occurred before the status of this condition could be determined." + xr.SetConditions(c) + } + } + return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } @@ -654,16 +722,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco r.record.Event(xr, event.Normal(reasonPublish, "Successfully published connection details")) } - warnings := 0 - for _, e := range res.Events { - if e.Type == event.TypeWarning { - warnings++ - } - log.Debug(e.Message) - r.record.Event(xr, e) - } + meta := r.handleCommonCompositionResult(ctx, res, xr) - if warnings == 0 { + if meta.numWarningEvents == 0 { // We don't consider warnings severe enough to prevent the XR from being // considered synced (i.e. severe enough to return a ReconcileError) but // they are severe enough that we probably shouldn't say we successfully @@ -736,3 +797,74 @@ func getComposerResourcesNames(cds []ComposedResource) []string { } return names } + +type compositionResultMeta struct { + numWarningEvents int + conditionTypesSeen map[xpv1.ConditionType]bool +} + +func (r *Reconciler) handleCommonCompositionResult(ctx context.Context, res CompositionResult, xr *composite.Unstructured) compositionResultMeta { + log := r.log.WithValues( + "uid", xr.GetUID(), + "version", xr.GetResourceVersion(), + "name", xr.GetName(), + ) + + cm, err := getClaimFromXR(ctx, r.client, xr) + if err != nil { + log.Debug(errGetClaim, "error", err) + } + + numWarningEvents := 0 + for _, e := range res.Events { + if e.Event.Type == event.TypeWarning { + numWarningEvents++ + } + + detailedEvent := e.AsDetailedEvent() + log.Debug(detailedEvent.Message) + r.record.Event(xr, detailedEvent) + + if e.Target == CompositionTargetCompositeAndClaim && cm != nil { + r.record.Event(cm, e.AsEvent()) + } + } + + conditionTypesSeen := make(map[xpv1.ConditionType]bool) + for _, c := range res.Conditions { + if xpv1.IsSystemConditionType(c.Condition.Type) { + // Do not let users update system conditions. + continue + } + conditionTypesSeen[c.Condition.Type] = true + xr.SetConditions(c.Condition) + if c.Target == CompositionTargetCompositeAndClaim { + // We can ignore the error as it only occurs if given a system condition. + _ = xr.SetClaimConditionTypes(c.Condition.Type) + } + } + + return compositionResultMeta{ + numWarningEvents: numWarningEvents, + conditionTypesSeen: conditionTypesSeen, + } +} + +func getClaimFromXR(ctx context.Context, c client.Client, xr *composite.Unstructured) (*claim.Unstructured, error) { + if xr.GetClaimReference() == nil { + return nil, nil + } + + gv, err := schema.ParseGroupVersion(xr.GetClaimReference().APIVersion) + if err != nil { + return nil, errors.Wrap(err, errParseClaimRef) + } + + claimGVK := gv.WithKind(xr.GetClaimReference().Kind) + cm := claim.New(claim.WithGroupVersionKind(claimGVK)) + claimNN := types.NamespacedName{Namespace: xr.GetClaimReference().Namespace, Name: xr.GetClaimReference().Name} + if err := c.Get(ctx, claimNN, cm); err != nil { + return nil, errors.Wrap(err, errGetClaim) + } + return cm, nil +} diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 6d68d15c1..4fa0dce53 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -18,6 +18,7 @@ package composite import ( "context" + "fmt" "testing" "time" @@ -26,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -36,6 +38,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -446,7 +449,12 @@ func TestReconcile(t *testing.T) { })), WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ - Events: []event.Event{event.Warning("Warning", errBoom)}, + Events: []TargetedEvent{ + { + Event: event.Warning("Warning", errBoom), + Target: CompositionTargetComposite, + }, + }, }, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ @@ -714,6 +722,588 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{RequeueAfter: defaultPollInterval}, }, }, + "CustomEventsAndConditions": { + reason: "We should emit custom events and set custom conditions that were returned by the composer on both the composite resource and the claim.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions( + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.ReconcileSuccess(), + xpv1.Available(), + ) + cr.(*composite.Unstructured).SetClaimConditionTypes("DatabaseReady") + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The composite should have the "Pipeline step" prefix. + Message: "Pipeline step \"some-function\": This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: claimKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The claim should not have the "Pipeline step" prefix. + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Pipeline step \"some-function\": Internal sync was successful.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Internal sync was successful.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetComposite, + }, + }, + Conditions: []TargetedCondition{ + { + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + Target: CompositionTargetCompositeAndClaim, + }, + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, + "CustomEventsAndConditionFatal": { + reason: "In the case of a fatal result from the composer, we should set all custom conditions that were seen. If any custom conditions were not seen, they should be marked as Unknown. The error message should be emitted as an event to the composite but not the claim.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + xr.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }) + xr.SetClaimConditionTypes("DatabaseReady") + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + + cr.SetConditions( + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionUnknown, + Reason: "FatalError", + Message: "A fatal error occurred before the status of this condition could be determined.", + }, + xpv1.ReconcileError(fmt.Errorf("cannot compose resources: %w", errBoom)), + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + ) + + cr.(*composite.Unstructured).SetClaimConditionTypes( + "DatabaseReady", + "BucketReady", + ) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Warning("ComposeResources", fmt.Errorf("cannot compose resources: %w", errBoom)), + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "Pipeline step \"some-function\": This is an event for database availability.", + }, + }, + eventArgs{ + Kind: claimKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The claim should not have the "Pipeline step" prefix. + Message: "This is an event for database availability.", + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + // The composite should have the "Pipeline step" prefix. + Message: "Pipeline step \"some-function\": Internal sync was successful.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "EventNoDetail", + // The composite should not have the prefix as it had an empty + // detail. + Message: "This event should not contain a detail prefix.", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Internal sync was successful.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetComposite, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "EventNoDetail", + Message: "This event should not contain a detail prefix.", + Annotations: map[string]string{}, + }, + Target: CompositionTargetComposite, + }, + }, + Conditions: []TargetedCondition{ + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + { + Condition: xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, errBoom + })), + }, + }, + want: want{ + r: reconcile.Result{Requeue: true}, + }, + }, + "CustomConditionUpdate": { + reason: "Custom conditions should be updated if they already exist. Additionally, if a condition already exists in the status but was not included in the response, it should remain in the status.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + // The database condition already exists on the XR. + xr.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }) + // The bucket began in a non-ready state. + xr.SetConditions(xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + Message: "Waiting for bucket to be created.", + }) + + xr.SetClaimConditionTypes("DatabaseReady", "BucketReady") + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions( + // The database condition should exist even though it was not seen + // during this reconcile. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + // The bucket condition should be updated to reflect the latest + // condition which is available. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.ReconcileSuccess(), + xpv1.Available(), + ) + cr.(*composite.Unstructured).SetClaimConditionTypes( + // The database claim condition should exist even though it was + // not seen during this reconcile. + "DatabaseReady", + "BucketReady", + ) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{}, + Conditions: []TargetedCondition{ + // The database condition is not added to the XR again. + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + // The bucket is now ready. + { + Condition: xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, + "CustomEventsFailToGetClaim": { + reason: "We should emit custom events that were returned by the composer. If we cannot get the claim, we should just emit events for the composite and continue as normal.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + return nil + } + if _, ok := obj.(*claim.Unstructured); ok { + // something went wrong when getting the claim + return errBoom + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "Pipeline step \"some-function\": This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, } for name, tc := range cases { @@ -727,6 +1317,12 @@ func TestReconcile(t *testing.T) { if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) } + + if tr, ok := r.record.(*testRecorder); ok { + if diff := cmp.Diff(tr.Want, tr.Got, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want events, +got events:\n%s", tc.reason, diff) + } + } }) } } @@ -830,3 +1426,41 @@ func TestFilterToXRPatches(t *testing.T) { }) } } + +// Test types. +const ( + compositeKind = "Composite" + claimKind = "Claim" +) + +// testRecorder allows asserting event creation. +type testRecorder struct { + Want []eventArgs + Got []eventArgs +} + +type eventArgs struct { + Kind string + Event event.Event +} + +func (r *testRecorder) Event(obj runtime.Object, e event.Event) { + var kind string + switch obj.(type) { + case *composite.Unstructured: + kind = compositeKind + case *claim.Unstructured: + kind = claimKind + } + r.Got = append(r.Got, eventArgs{Kind: kind, Event: e}) +} + +func (r *testRecorder) WithAnnotations(_ ...string) event.Recorder { + return r +} + +func newTestRecorder(expected ...eventArgs) *testRecorder { + return &testRecorder{ + Want: expected, + } +} diff --git a/internal/controller/pkg/revision/establisher_test.go b/internal/controller/pkg/revision/establisher_test.go index e33485845..d550a896c 100644 --- a/internal/controller/pkg/revision/establisher_test.go +++ b/internal/controller/pkg/revision/establisher_test.go @@ -20,7 +20,6 @@ import ( "context" "testing" - "github.com/aws/smithy-go/ptr" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" admv1 "k8s.io/api/admissionregistration/v1" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -697,7 +697,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { for _, ref := range o.GetOwnerReferences() { if ref.Kind == "ProviderRevision" && ref.UID == "some-unique-uid-2312" { found = true - if ptr.ToBool(ref.Controller) { + if ptr.Deref(ref.Controller, false) { t.Errorf("expected controller to be false, got %t", *ref.Controller) } } diff --git a/internal/xcrd/crd_test.go b/internal/xcrd/crd_test.go index 91323d1fc..60b6dd549 100644 --- a/internal/xcrd/crd_test.go +++ b/internal/xcrd/crd_test.go @@ -466,6 +466,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -783,6 +792,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1050,6 +1068,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1333,6 +1360,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1626,6 +1662,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -2119,6 +2164,15 @@ func TestForCompositeResourceClaim(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -2405,6 +2459,15 @@ func TestForCompositeResourceClaim(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -2705,6 +2768,15 @@ func TestForCompositeResourceClaimEmptyXrd(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ diff --git a/internal/xcrd/schemas.go b/internal/xcrd/schemas.go index d8fb00917..1d21b5cfd 100644 --- a/internal/xcrd/schemas.go +++ b/internal/xcrd/schemas.go @@ -356,6 +356,15 @@ func CompositeResourceStatusProps() map[string]extv1.JSONSchemaProps { "lastPublishedTime": {Type: "string", Format: "date-time"}, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, } } From 881f76275d55f898a73cc7b21f7567d15b21d6a8 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Tue, 18 Jun 2024 19:45:17 +0300 Subject: [PATCH 303/370] Initial recursive validation logic Signed-off-by: Mehmet Enes --- build | 1 + cmd/crank/beta/validate/validate.go | 17 +++++++--- cmd/crank/beta/validate/validator.go | 48 ++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 4 deletions(-) create mode 160000 build create mode 100644 cmd/crank/beta/validate/validator.go diff --git a/build b/build new file mode 160000 index 000000000..75a9fe3ae --- /dev/null +++ b/build @@ -0,0 +1 @@ +Subproject commit 75a9fe3ae6b6de82c5f7ddc6a267617940f16b83 diff --git a/cmd/crank/beta/validate/validate.go b/cmd/crank/beta/validate/validate.go index 2716d1bf2..f637278bd 100644 --- a/cmd/crank/beta/validate/validate.go +++ b/cmd/crank/beta/validate/validate.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - ext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" @@ -28,6 +27,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" runtimeschema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" celconfig "k8s.io/apiserver/pkg/apis/cel" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -96,7 +96,6 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust if err != nil { return errors.Wrap(err, "cannot create schema validators") } - failure, missingSchemas := 0, 0 for i, r := range resources { @@ -112,9 +111,19 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust } rf := 0 - + re := field.ErrorList{} + for _, c := range crds { + if c.Spec.Names.Kind == r.GetKind() { + rc, err := Validate(*c, *r) + if err != nil { + return errors.Wrap(err, "cannot validate resource") + } + re = append(re, rc...) + break + } + } for _, v := range sv { - re := validation.ValidateCustomResource(nil, r, *v) + re = append(re, validation.ValidateCustomResource(nil, r, *v)...) for _, e := range re { rf++ if _, err := fmt.Fprintf(w, "[x] schema validation error %s, %s : %s\n", r.GroupVersionKind().String(), getResourceName(r), e.Error()); err != nil { diff --git a/cmd/crank/beta/validate/validator.go b/cmd/crank/beta/validate/validator.go new file mode 100644 index 000000000..f88822440 --- /dev/null +++ b/cmd/crank/beta/validate/validator.go @@ -0,0 +1,48 @@ +package validate + +import ( + "fmt" + + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func validateRecursive(fields map[string]interface{}, sp map[string]extv1.JSONSchemaProps, p *field.Path, errs *field.ErrorList, stdFlds map[string]bool) error { + for key, val := range fields { + if stdFlds[key] { + continue + } + s, ok := sp[key] + if ok { + if s.Type == "object" && s.Properties != nil { + cf, _ := val.(map[string]interface{}) + if err := validateRecursive(cf, s.Properties, p.Child(key), errs, stdFlds); err != nil { + return err + } + } + } else { + e := field.InternalError(p.Child(key), fmt.Errorf("unknown field \"%s\"", p.Child(key).String())) + + *errs = append(*errs, e) + } + } + return nil +} + +// Validate validates the given resource against the CRD schema. +func Validate(crd extv1.CustomResourceDefinition, mr unstructured.Unstructured) (field.ErrorList, error) { + // Standard Kubernetes fields in all resources that should not be validated against CRD specific schema + stdFlds := map[string]bool{ + "apiVersion": true, + "kind": true, + "metadata": true, + } + + s := crd.Spec.Versions[0].Schema.OpenAPIV3Schema + fields := mr.UnstructuredContent() + errs := field.ErrorList{} + err := validateRecursive(fields, s.Properties, nil, &errs, stdFlds) + + return errs, err +} From ef857dff03ca1574ca87299ba3144019408cabef Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Wed, 19 Jun 2024 15:41:15 +0300 Subject: [PATCH 304/370] use pruning to get unknown fields Signed-off-by: Mehmet Enes --- build | 1 - cmd/crank/beta/validate/unknown_fields.go | 22 +++++++++++ cmd/crank/beta/validate/validate.go | 16 ++------ cmd/crank/beta/validate/validator.go | 48 ----------------------- 4 files changed, 26 insertions(+), 61 deletions(-) delete mode 160000 build create mode 100644 cmd/crank/beta/validate/unknown_fields.go delete mode 100644 cmd/crank/beta/validate/validator.go diff --git a/build b/build deleted file mode 160000 index 75a9fe3ae..000000000 --- a/build +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 75a9fe3ae6b6de82c5f7ddc6a267617940f16b83 diff --git a/cmd/crank/beta/validate/unknown_fields.go b/cmd/crank/beta/validate/unknown_fields.go new file mode 100644 index 000000000..c26360dad --- /dev/null +++ b/cmd/crank/beta/validate/unknown_fields.go @@ -0,0 +1,22 @@ +package validate + +import ( + "fmt" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// validateUnknownFields Validates the resource's unknown fields against the given schema and returns a list of errors. +func validateUnknownFields(mr map[string]interface{}, sch *schema.Structural) field.ErrorList { + opts := schema.UnknownFieldPathOptions{ + TrackUnknownFieldPaths: true, // to get the list of pruned unknown fields + } + errs := field.ErrorList{} + + unkFlds := pruning.PruneWithOptions(mr, sch, true, opts) + for _, f := range unkFlds { + errs = append(errs, field.InternalError(field.NewPath(f), fmt.Errorf("unknown field \"%s\"", f))) + } + return errs +} diff --git a/cmd/crank/beta/validate/validate.go b/cmd/crank/beta/validate/validate.go index f637278bd..7da0f6509 100644 --- a/cmd/crank/beta/validate/validate.go +++ b/cmd/crank/beta/validate/validate.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + ext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" @@ -96,11 +97,13 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust if err != nil { return errors.Wrap(err, "cannot create schema validators") } + failure, missingSchemas := 0, 0 for i, r := range resources { gvk := r.GetObjectKind().GroupVersionKind() sv, ok := schemaValidators[gvk] + s := structurals[gvk] // if we have a schema validator, we should also have a structural if !ok { missingSchemas++ if _, err := fmt.Fprintf(w, "[!] could not find CRD/XRD for: %s\n", r.GroupVersionKind().String()); err != nil { @@ -112,18 +115,9 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust rf := 0 re := field.ErrorList{} - for _, c := range crds { - if c.Spec.Names.Kind == r.GetKind() { - rc, err := Validate(*c, *r) - if err != nil { - return errors.Wrap(err, "cannot validate resource") - } - re = append(re, rc...) - break - } - } for _, v := range sv { re = append(re, validation.ValidateCustomResource(nil, r, *v)...) + re = append(re, validateUnknownFields(r.UnstructuredContent(), s)...) for _, e := range re { rf++ if _, err := fmt.Fprintf(w, "[x] schema validation error %s, %s : %s\n", r.GroupVersionKind().String(), getResourceName(r), e.Error()); err != nil { @@ -131,8 +125,6 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust } } - s := structurals[gvk] // if we have a schema validator, we should also have a structural - celValidator := cel.NewValidator(s, true, celconfig.PerCallLimit) re, _ = celValidator.Validate(context.TODO(), nil, s, resources[i].Object, nil, celconfig.PerCallLimit) for _, e := range re { diff --git a/cmd/crank/beta/validate/validator.go b/cmd/crank/beta/validate/validator.go deleted file mode 100644 index f88822440..000000000 --- a/cmd/crank/beta/validate/validator.go +++ /dev/null @@ -1,48 +0,0 @@ -package validate - -import ( - "fmt" - - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -func validateRecursive(fields map[string]interface{}, sp map[string]extv1.JSONSchemaProps, p *field.Path, errs *field.ErrorList, stdFlds map[string]bool) error { - for key, val := range fields { - if stdFlds[key] { - continue - } - s, ok := sp[key] - if ok { - if s.Type == "object" && s.Properties != nil { - cf, _ := val.(map[string]interface{}) - if err := validateRecursive(cf, s.Properties, p.Child(key), errs, stdFlds); err != nil { - return err - } - } - } else { - e := field.InternalError(p.Child(key), fmt.Errorf("unknown field \"%s\"", p.Child(key).String())) - - *errs = append(*errs, e) - } - } - return nil -} - -// Validate validates the given resource against the CRD schema. -func Validate(crd extv1.CustomResourceDefinition, mr unstructured.Unstructured) (field.ErrorList, error) { - // Standard Kubernetes fields in all resources that should not be validated against CRD specific schema - stdFlds := map[string]bool{ - "apiVersion": true, - "kind": true, - "metadata": true, - } - - s := crd.Spec.Versions[0].Schema.OpenAPIV3Schema - fields := mr.UnstructuredContent() - errs := field.ErrorList{} - err := validateRecursive(fields, s.Properties, nil, &errs, stdFlds) - - return errs, err -} From 98c029ab183cb67abdc227e9dd45496690d5d7e5 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Wed, 19 Jun 2024 16:53:55 +0300 Subject: [PATCH 305/370] change error format of unknown field validation Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/unknown_fields.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/crank/beta/validate/unknown_fields.go b/cmd/crank/beta/validate/unknown_fields.go index c26360dad..6f4c2a1fd 100644 --- a/cmd/crank/beta/validate/unknown_fields.go +++ b/cmd/crank/beta/validate/unknown_fields.go @@ -5,6 +5,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning" "k8s.io/apimachinery/pkg/util/validation/field" + "strings" ) // validateUnknownFields Validates the resource's unknown fields against the given schema and returns a list of errors. @@ -16,7 +17,9 @@ func validateUnknownFields(mr map[string]interface{}, sch *schema.Structural) fi unkFlds := pruning.PruneWithOptions(mr, sch, true, opts) for _, f := range unkFlds { - errs = append(errs, field.InternalError(field.NewPath(f), fmt.Errorf("unknown field \"%s\"", f))) + strPath := strings.Split(f, ".") + child := strPath[len(strPath)-1] + errs = append(errs, field.InternalError(field.NewPath(f), fmt.Errorf("unknown field \"%s\"", child))) } return errs } From c369af412fbc35de317bb6b3cea19fa43a2742e2 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Sat, 29 Jun 2024 14:34:36 +0300 Subject: [PATCH 306/370] add tests and styling Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/unknown_fields.go | 9 ++- cmd/crank/beta/validate/validate_test.go | 98 +++++++++++++++++++++++ 2 files changed, 103 insertions(+), 4 deletions(-) diff --git a/cmd/crank/beta/validate/unknown_fields.go b/cmd/crank/beta/validate/unknown_fields.go index 6f4c2a1fd..ac7aac958 100644 --- a/cmd/crank/beta/validate/unknown_fields.go +++ b/cmd/crank/beta/validate/unknown_fields.go @@ -2,10 +2,11 @@ package validate import ( "fmt" + "strings" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning" "k8s.io/apimachinery/pkg/util/validation/field" - "strings" ) // validateUnknownFields Validates the resource's unknown fields against the given schema and returns a list of errors. @@ -15,11 +16,11 @@ func validateUnknownFields(mr map[string]interface{}, sch *schema.Structural) fi } errs := field.ErrorList{} - unkFlds := pruning.PruneWithOptions(mr, sch, true, opts) - for _, f := range unkFlds { + uf := pruning.PruneWithOptions(mr, sch, true, opts) + for _, f := range uf { strPath := strings.Split(f, ".") child := strPath[len(strPath)-1] - errs = append(errs, field.InternalError(field.NewPath(f), fmt.Errorf("unknown field \"%s\"", child))) + errs = append(errs, field.Invalid(field.NewPath(f), child, fmt.Sprintf("unknown field: \"%s\"", child))) } return errs } diff --git a/cmd/crank/beta/validate/validate_test.go b/cmd/crank/beta/validate/validate_test.go index cb252df13..f65623eb1 100644 --- a/cmd/crank/beta/validate/validate_test.go +++ b/cmd/crank/beta/validate/validate_test.go @@ -18,10 +18,12 @@ package validate import ( "bytes" + "strings" "testing" "github.com/google/go-cmp/cmp" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" @@ -1338,3 +1340,99 @@ func TestValidateResources(t *testing.T) { }) } } + +func TestValidateUnknownFields(t *testing.T) { + type args struct { + mr map[string]interface{} + sch *schema.Structural + } + type want struct { + errField string + errMsg string + hasError bool + } + cases := map[string]struct { + reason string + args args + want want + }{ + "UnknownFieldPresent": { + reason: "Should detect unknown fields in the resource and return an error", + args: args{ + mr: map[string]interface{}{ + "apiVersion": "test.org/v1alpha1", + "kind": "Test", + "metadata": map[string]interface{}{ + "name": "test-instance", + }, + "spec": map[string]interface{}{ + "replicas": 3, + "unknownField": "should fail", // This field is not defined in the CRD schema + }, + }, + sch: &schema.Structural{ + Properties: map[string]schema.Structural{ + "spec": { + Properties: map[string]schema.Structural{ + "replicas": { + Generic: schema.Generic{Type: "integer"}, + }, + }, + }, + }, + }, + }, + want: want{ + errField: "spec.unknownField", + errMsg: "unknown field: \"unknownField\"", + hasError: true, + }, + }, + "UnknownFieldNotPresent": { + reason: "Should not return an error when no unknown fields are present", + args: args{ + mr: map[string]interface{}{ + "apiVersion": "test.org/v1alpha1", + "kind": "Test", + "metadata": map[string]interface{}{ + "name": "test-instance", + }, + "spec": map[string]interface{}{ + "replicas": 3, // No unknown fields + }, + }, + sch: &schema.Structural{ + Properties: map[string]schema.Structural{ + "spec": { + Properties: map[string]schema.Structural{ + "replicas": { + Generic: schema.Generic{Type: "integer"}, + }, + }, + }, + }, + }, + }, + want: want{ + hasError: false, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + errs := validateUnknownFields(tc.args.mr, tc.args.sch) + if tc.want.hasError { + if len(errs) == 0 { + t.Errorf("%s: expected error but got none", tc.reason) + } else if errs[0].Field != tc.want.errField || !strings.Contains(errs[0].Error(), tc.want.errMsg) { + t.Errorf("%s: expected %s, %s; got %s", tc.reason, tc.want.errField, tc.want.errMsg, errs[0].Error()) + } + } else { + if len(errs) != 0 { + t.Errorf("%s: expected no errors but got %v", tc.reason, errs) + } + } + }) + } +} From 371e997bc9fad13d9a0d840f42e1c8ac30c58886 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Sun, 30 Jun 2024 19:54:06 +0300 Subject: [PATCH 307/370] add license boilerplate text Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/unknown_fields.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmd/crank/beta/validate/unknown_fields.go b/cmd/crank/beta/validate/unknown_fields.go index ac7aac958..4f918e033 100644 --- a/cmd/crank/beta/validate/unknown_fields.go +++ b/cmd/crank/beta/validate/unknown_fields.go @@ -1,3 +1,19 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package validate import ( From 6b227e31f19149004d5678a208505a6362413886 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Wed, 3 Jul 2024 20:24:34 +0300 Subject: [PATCH 308/370] update testing approach Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/validate_test.go | 26 ++++++++---------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/cmd/crank/beta/validate/validate_test.go b/cmd/crank/beta/validate/validate_test.go index f65623eb1..7f7ec9f47 100644 --- a/cmd/crank/beta/validate/validate_test.go +++ b/cmd/crank/beta/validate/validate_test.go @@ -18,7 +18,6 @@ package validate import ( "bytes" - "strings" "testing" "github.com/google/go-cmp/cmp" @@ -26,6 +25,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -1347,9 +1347,7 @@ func TestValidateUnknownFields(t *testing.T) { sch *schema.Structural } type want struct { - errField string - errMsg string - hasError bool + errs field.ErrorList } cases := map[string]struct { reason string @@ -1383,9 +1381,9 @@ func TestValidateUnknownFields(t *testing.T) { }, }, want: want{ - errField: "spec.unknownField", - errMsg: "unknown field: \"unknownField\"", - hasError: true, + errs: field.ErrorList{ + field.Invalid(field.NewPath("spec.unknownField"), "unknownField", `unknown field: "unknownField"`), + }, }, }, "UnknownFieldNotPresent": { @@ -1414,7 +1412,7 @@ func TestValidateUnknownFields(t *testing.T) { }, }, want: want{ - hasError: false, + errs: field.ErrorList{}, }, }, } @@ -1422,16 +1420,8 @@ func TestValidateUnknownFields(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { errs := validateUnknownFields(tc.args.mr, tc.args.sch) - if tc.want.hasError { - if len(errs) == 0 { - t.Errorf("%s: expected error but got none", tc.reason) - } else if errs[0].Field != tc.want.errField || !strings.Contains(errs[0].Error(), tc.want.errMsg) { - t.Errorf("%s: expected %s, %s; got %s", tc.reason, tc.want.errField, tc.want.errMsg, errs[0].Error()) - } - } else { - if len(errs) != 0 { - t.Errorf("%s: expected no errors but got %v", tc.reason, errs) - } + if diff := cmp.Diff(tc.want.errs, errs, test.EquateErrors()); diff != "" { + t.Errorf("%s\nvalidateUnknownFields(...): -want errs, +got errs:\n%s", tc.reason, diff) } }) } From 758faeb52671b8e7073f545d1af01accca3d5aba Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Wed, 3 Jul 2024 17:35:37 -0700 Subject: [PATCH 309/370] README: add SIG-docs to the list of SIGs Signed-off-by: Jared Watts --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 24f7abb44..ee6292fa7 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,7 @@ find the meetings in the [Community Calendar][community calendar]. - [#sig-composition-functions][sig-composition-functions-slack] - [#sig-deletion-ordering][sig-deletion-ordering-slack] - [#sig-devex][sig-devex-slack] +- [#sig-docs][sig-docs-slack] - [#sig-e2e-testing][sig-e2e-testing-slack] - [#sig-observability][sig-observability-slack] - [#sig-observe-only][sig-observe-only-slack] @@ -128,6 +129,7 @@ Crossplane is under the Apache 2.0 license. [sig-composition-functions-slack]: https://crossplane.slack.com/archives/C031Y29CSAE [sig-deletion-ordering-slack]: https://crossplane.slack.com/archives/C05BP8W5ALW [sig-devex-slack]: https://crossplane.slack.com/archives/C05U1LLM3B2 +[sig-docs-slack]: https://crossplane.slack.com/archives/C02CAQ52DPU [sig-e2e-testing-slack]: https://crossplane.slack.com/archives/C05C8CCTVNV [sig-observability-slack]: https://crossplane.slack.com/archives/C061GNH3LA0 [sig-observe-only-slack]: https://crossplane.slack.com/archives/C04D5988QEA From 0b2969483b2e5ce010a3d11205d751f9382335a9 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Thu, 4 Jul 2024 10:38:58 +0300 Subject: [PATCH 310/370] Add configuration.meta support for deps to beta validate Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager.go | 52 +++++-- cmd/crank/beta/validate/manager_test.go | 176 ++++++++++++++++++++++++ 2 files changed, 213 insertions(+), 15 deletions(-) create mode 100644 cmd/crank/beta/validate/manager_test.go diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 9d30dc397..ca16ba932 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -50,8 +50,8 @@ type Manager struct { writer io.Writer crds []*extv1.CustomResourceDefinition - deps map[string]bool // One level dependency images - confs map[string]bool // Configuration images + deps map[string]bool // Dependency images + confs map[string]interface{} // Configuration images } // NewManager returns a new Manager. @@ -67,7 +67,7 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m.writer = w m.crds = make([]*extv1.CustomResourceDefinition, 0) m.deps = make(map[string]bool) - m.confs = make(map[string]bool) + m.confs = make(map[string]interface{}) return m } @@ -133,6 +133,19 @@ func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error m.confs[image] = true + case schema.GroupKind{Group: "meta.pkg.crossplane.io", Kind: "Configuration"}: + meta, err := e.MarshalJSON() + if err != nil { + return errors.Wrap(err, "cannot marshal configuration to JSON") + } + + cfg := &metav1.Configuration{} + if err := yaml.Unmarshal(meta, cfg); err != nil { + return errors.Wrapf(err, "cannot unmarshal configuration YAML") + } + + m.confs[cfg.Name] = cfg + default: continue } @@ -171,21 +184,30 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { func (m *Manager) addDependencies() error { for image := range m.confs { - m.deps[image] = true // we need to download the configuration package for the XRDs + cfg := &metav1.Configuration{} - layer, err := m.fetcher.FetchBaseLayer(image) - if err != nil { - return errors.Wrapf(err, "cannot download package %s", image) - } + switch c := m.confs[image].(type) { + case bool: + m.deps[image] = true // we need to download the configuration package for the XRDs - _, meta, err := extractPackageContent(*layer) - if err != nil { - return errors.Wrapf(err, "cannot extract package file and meta") - } + layer, err := m.fetcher.FetchBaseLayer(image) + if err != nil { + return errors.Wrapf(err, "cannot download package %s", image) + } - cfg := &metav1.Configuration{} - if err := yaml.Unmarshal(meta, cfg); err != nil { - return errors.Wrapf(err, "cannot unmarshal configuration YAML") + _, meta, err := extractPackageContent(*layer) + if err != nil { + return errors.Wrapf(err, "cannot extract package file and meta") + } + if err := yaml.Unmarshal(meta, cfg); err != nil { + return errors.Wrapf(err, "cannot unmarshal configuration YAML") + } + + case *metav1.Configuration: + cfg = c // Configuration.meta is used for pulling dependencies + + default: + return errors.New("unknown configuration type") } deps := cfg.Spec.MetaSpec.DependsOn diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go new file mode 100644 index 000000000..81aea7a41 --- /dev/null +++ b/cmd/crank/beta/validate/manager_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validate + +import ( + "bytes" + "fmt" + "github.com/google/go-containerregistry/pkg/v1/static" + "github.com/google/go-containerregistry/pkg/v1/types" + "testing" + + "github.com/google/go-cmp/cmp" + conregv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/spf13/afero" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/crossplane/crossplane-runtime/pkg/test" +) + +var ( + configDep2Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: + name: config-dep-2 +spec: + dependsOn: + - provider: provider-dep-1 + version: "v1.3.0" + - function: function-dep-1 + version: "v1.3.0" +--- + +`) + + providerYaml = []byte(`apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-dep-1 +spec: + package: provider-dep-1:v1.3.0 +--- + +`) + + funcYaml = []byte(`apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-dep-1 +spec: + package: function-dep-1:v1.3.0 +--- + +`) +) + +func TestConfigurationTypeSupport(t *testing.T) { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager(".crossplane/cache", fs, w) + confpkg := static.NewLayer(configDep2Yaml, types.OCILayer) + pd := static.NewLayer(providerYaml, types.OCILayer) + fd := static.NewLayer(funcYaml, types.OCILayer) + + type args struct { + extensions []*unstructured.Unstructured + fetchMock func(image string) (*conregv1.Layer, error) + } + type want struct { + err error + confs int + deps int + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessfulDependenciesAddition": { + reason: "All dependencies should be successfully added from both Configuration.pkg and Configuration.meta", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "meta.pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-meta", + }, + "spec": map[string]interface{}{ + "dependsOn": []map[string]interface{}{ + { + "provider": "provider-dep-1", + "version": "v1.3.0", + }, + }, + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-pkg", + }, + "spec": map[string]interface{}{ + "package": "config-dep-2:v1.3.0", + }, + }, + }, + }, fetchMock: func(image string) (*conregv1.Layer, error) { + switch image { + case "config-dep-2:v1.3.0": + return &confpkg, nil + case "provider-dep-1:v1.3.0": + return &pd, nil + case "function-dep-1:v1.3.0": + return &fd, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + }, + }, + want: want{ + err: nil, + confs: 2, + deps: 3, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + m.fetcher = &MockFetcher{tc.args.fetchMock} + err := m.PrepExtensions(tc.args.extensions) + + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nPrepExtensions(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = m.addDependencies() + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) + } + + if diff := cmp.Diff(tc.want.confs, len(m.confs)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want confs, +got confs:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.deps, len(m.deps)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want deps, +got deps:\n%s", tc.reason, diff) + } + }) + } +} + +type MockFetcher struct { + fetch func(image string) (*conregv1.Layer, error) +} + +func (m *MockFetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { + return m.fetch(image) +} From 566791d8ea8f06570b800990da09305c8f2fdd1d Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:35:57 +0300 Subject: [PATCH 311/370] Update cmd/crank/beta/validate/manager_test.go Co-authored-by: Ezgi Demirel Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- cmd/crank/beta/validate/manager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 81aea7a41..557c66224 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -19,11 +19,11 @@ package validate import ( "bytes" "fmt" - "github.com/google/go-containerregistry/pkg/v1/static" - "github.com/google/go-containerregistry/pkg/v1/types" "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/v1/static" + "github.com/google/go-containerregistry/pkg/v1/types" conregv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/spf13/afero" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" From 75380c02b12253f32c75cf96c2952564d7bc1df3 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Fri, 12 Jul 2024 16:14:31 +0300 Subject: [PATCH 312/370] Change m.confs variable type and update tests Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager.go | 22 ++--- cmd/crank/beta/validate/manager_test.go | 118 ++++++++++++++++++------ 2 files changed, 99 insertions(+), 41 deletions(-) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index ca16ba932..0966cf09f 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -50,8 +50,8 @@ type Manager struct { writer io.Writer crds []*extv1.CustomResourceDefinition - deps map[string]bool // Dependency images - confs map[string]interface{} // Configuration images + deps map[string]bool // Dependency images + confs map[string]*metav1.Configuration // Configuration images } // NewManager returns a new Manager. @@ -67,7 +67,7 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m.writer = w m.crds = make([]*extv1.CustomResourceDefinition, 0) m.deps = make(map[string]bool) - m.confs = make(map[string]interface{}) + m.confs = make(map[string]*metav1.Configuration) return m } @@ -131,7 +131,7 @@ func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error return errors.Wrapf(err, "cannot get package image") } - m.confs[image] = true + m.confs[image] = nil case schema.GroupKind{Group: "meta.pkg.crossplane.io", Kind: "Configuration"}: meta, err := e.MarshalJSON() @@ -184,10 +184,9 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { func (m *Manager) addDependencies() error { for image := range m.confs { - cfg := &metav1.Configuration{} + cfg := m.confs[image] - switch c := m.confs[image].(type) { - case bool: + if cfg == nil { m.deps[image] = true // we need to download the configuration package for the XRDs layer, err := m.fetcher.FetchBaseLayer(image) @@ -199,15 +198,10 @@ func (m *Manager) addDependencies() error { if err != nil { return errors.Wrapf(err, "cannot extract package file and meta") } - if err := yaml.Unmarshal(meta, cfg); err != nil { + if err := yaml.Unmarshal(meta, &cfg); err != nil { return errors.Wrapf(err, "cannot unmarshal configuration YAML") } - - case *metav1.Configuration: - cfg = c // Configuration.meta is used for pulling dependencies - - default: - return errors.New("unknown configuration type") + m.confs[image] = cfg // update the configuration } deps := cfg.Spec.MetaSpec.DependsOn diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 557c66224..20a14072c 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -22,9 +22,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" + conregv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/static" "github.com/google/go-containerregistry/pkg/v1/types" - conregv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/spf13/afero" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -32,16 +32,14 @@ import ( ) var ( - configDep2Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 + configPkg = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 kind: Configuration metadata: - name: config-dep-2 + name: config-pkg spec: dependsOn: - provider: provider-dep-1 version: "v1.3.0" - - function: function-dep-1 - version: "v1.3.0" --- `) @@ -68,14 +66,23 @@ spec: ) func TestConfigurationTypeSupport(t *testing.T) { - fs := afero.NewMemMapFs() - w := &bytes.Buffer{} - - m := NewManager(".crossplane/cache", fs, w) - confpkg := static.NewLayer(configDep2Yaml, types.OCILayer) + confpkg := static.NewLayer(configPkg, types.OCILayer) pd := static.NewLayer(providerYaml, types.OCILayer) fd := static.NewLayer(funcYaml, types.OCILayer) + fetchMockFunc := func(image string) (*conregv1.Layer, error) { + switch image { + case "config-pkg:v1.3.0": + return &confpkg, nil + case "provider-dep-1:v1.3.0": + return &pd, nil + case "function-dep-1:v1.3.0": + return &fd, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + } + type args struct { extensions []*unstructured.Unstructured fetchMock func(image string) (*conregv1.Layer, error) @@ -90,8 +97,37 @@ func TestConfigurationTypeSupport(t *testing.T) { args args want want }{ - "SuccessfulDependenciesAddition": { - reason: "All dependencies should be successfully added from both Configuration.pkg and Configuration.meta", + "SuccessfulConfigPkg": { + //config-pkg + //└─►provider-dep-1 + reason: "All dependencies should be successfully added from Configuration.pkg", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-pkg", + }, + "spec": map[string]interface{}{ + "package": "config-pkg:v1.3.0", + }, + }, + }, + }, + fetchMock: fetchMockFunc, + }, + want: want{ + err: nil, + confs: 1, // Configuration.pkg from remote + deps: 2, // 1 provider, 1 Configuration.pkg dependency + }, + }, + "SuccessfulConfigMeta": { + //config-meta + //└─►function-dep-1 + reason: "All dependencies should be successfully added from Configuration.meta", args: args{ extensions: []*unstructured.Unstructured{ { @@ -104,7 +140,41 @@ func TestConfigurationTypeSupport(t *testing.T) { "spec": map[string]interface{}{ "dependsOn": []map[string]interface{}{ { - "provider": "provider-dep-1", + "function": "function-dep-1", + "version": "v1.3.0", + }, + }, + }, + }, + }, + }, + fetchMock: fetchMockFunc, + }, + want: want{ + err: nil, + confs: 1, // Configuration.meta + deps: 1, // Not adding Configuration.meta itself to not send it to cacheDependencies() for download + }, + }, + "SuccessfulConfigMetaAndPkg": { + //config-meta + //└─►function-dep-1 + //config-pkg + //└─►provider-dep-1 + reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "meta.pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-meta", + }, + "spec": map[string]interface{}{ + "dependsOn": []map[string]interface{}{ + { + "function": "function-dep-1", "version": "v1.3.0", }, }, @@ -119,31 +189,25 @@ func TestConfigurationTypeSupport(t *testing.T) { "name": "config-pkg", }, "spec": map[string]interface{}{ - "package": "config-dep-2:v1.3.0", + "package": "config-pkg:v1.3.0", }, }, }, - }, fetchMock: func(image string) (*conregv1.Layer, error) { - switch image { - case "config-dep-2:v1.3.0": - return &confpkg, nil - case "provider-dep-1:v1.3.0": - return &pd, nil - case "function-dep-1:v1.3.0": - return &fd, nil - default: - return nil, fmt.Errorf("unknown image: %s", image) - } }, + fetchMock: fetchMockFunc, }, want: want{ err: nil, - confs: 2, - deps: 3, + confs: 2, // Configuration.meta and Configuration.pkg + deps: 3, // 1 Configuration.pkg, 1 provider, 1 function }, }, } for name, tc := range cases { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager(".crossplane/cache", fs, w) t.Run(name, func(t *testing.T) { m.fetcher = &MockFetcher{tc.args.fetchMock} err := m.PrepExtensions(tc.args.extensions) From 4959d72a3d3631b5ebebbf416f206e342b962875 Mon Sep 17 00:00:00 2001 From: stellrust Date: Mon, 15 Jul 2024 19:41:36 +0800 Subject: [PATCH 313/370] chore: fix comment for struct field Signed-off-by: stellrust --- internal/controller/apiextensions/composite/connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/apiextensions/composite/connection.go b/internal/controller/apiextensions/composite/connection.go index 3eb1cca5a..fb8618e57 100644 --- a/internal/controller/apiextensions/composite/connection.go +++ b/internal/controller/apiextensions/composite/connection.go @@ -255,7 +255,7 @@ type ConnectionDetailExtractConfig struct { // connection secret of the composition instance. Name string - // FromConnectionDetailKey is the key that will be used to fetch the value + // FromConnectionSecretKey is the key that will be used to fetch the value // from the given target resource's connection details. FromConnectionSecretKey *string From d365c201d66f5da22bd3f6a6c219b24e64a8711f Mon Sep 17 00:00:00 2001 From: Cem Mergenci Date: Wed, 17 Jul 2024 07:45:09 +0300 Subject: [PATCH 314/370] Fix broken links. Signed-off-by: Cem Mergenci --- apis/generate.go | 2 +- contributing/README.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apis/generate.go b/apis/generate.go index 7ca29c9b5..c272ffe2b 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -18,7 +18,7 @@ limitations under the License. */ // NOTE(negz): See the below link for details on what is happening here. -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +// https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module // Remove existing manifests //go:generate rm -rf ../cluster/crds diff --git a/contributing/README.md b/contributing/README.md index ed5fd72ab..ae6eb62b3 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -854,10 +854,10 @@ func TestExample(t *testing.T) { [build submodule]: https://github.com/crossplane/build/ [`kind`]: https://kind.sigs.k8s.io/ [Crossplane release cycle]: https://docs.crossplane.io/knowledge-base/guides/release-cycle -[good git commit hygiene]: https://www.futurelearn.com/info/blog/telling-stories-with-your-git-history +[good git commit hygiene]: https://www.futurelearn.com/info/blog/telling-stories-with-your-git-history?category=using-futurelearn [Developer Certificate of Origin]: https://github.com/apps/dco -[code review comments]: https://github.com/golang/go/wiki/CodeReviewComments -[test review comments]: https://github.com/golang/go/wiki/TestComments +[code review comments]: https://go.dev/wiki/CodeReviewComments +[test review comments]: https://go.dev/wiki/TestComments [E2E readme]: ../test/e2e/README.md [docs]: https://github.com/crossplane/docs [Effective Go]: https://golang.org/doc/effective_go @@ -871,4 +871,4 @@ func TestExample(t *testing.T) { [CODEOWNERS]: ../CODEOWNERS [Reviewers]: ../OWNERS.md#reviewers [Maintainers]: ../OWNERS.md#maintainers -[#4514]: https://github.com/crossplane/crossplane/issues/4514 \ No newline at end of file +[#4514]: https://github.com/crossplane/crossplane/issues/4514 From 6c781b4dd819dc7b683b0a7bc5b2dcfedfb1c174 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Thu, 9 May 2024 23:38:41 +0100 Subject: [PATCH 315/370] Support dependecies with version constraints in crank validate Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/image.go | 54 ++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index a308bb426..793e34218 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -18,8 +18,10 @@ package validate import ( "fmt" + "sort" "strings" + "github.com/Masterminds/semver" "github.com/google/go-containerregistry/pkg/crane" conregv1 "github.com/google/go-containerregistry/pkg/v1" "k8s.io/apimachinery/pkg/util/yaml" @@ -37,8 +39,56 @@ type Fetcher struct{} // FetchBaseLayer fetches the base layer of the image which contains the 'package.yaml' file. func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { - if strings.Contains(image, "sha") { // Strip the digest before fetching the image - image = strings.Split(image, "@")[0] + if strings.Contains(image, "@") { + // Strip the digest before fetching the image + image = strings.SplitN(image, "@", 2)[0] + } else if strings.Contains(image, ":") { + // Separate the image base and the image tag + parts := strings.SplitN(image, ":", 2) + imageBase := parts[0] + imageTag := parts[1] + + // Check if the tag is a constraint + isConstraint := true + c, err := semver.NewConstraint(imageTag) + if err != nil { + isConstraint = false + } + + if isConstraint { + // Fetch all image tags + tags, err := crane.ListTags(imageBase) + if err != nil { + return nil, errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) + } + + // Convert tags to semver versions + vs := []*semver.Version{} + for _, r := range tags { + v, err := semver.NewVersion(r) + if err != nil { + // We skip any tags that are not valid semantic versions + continue + } + vs = append(vs, v) + } + + // Sort all versions and find the last version complient with the constraint + sort.Sort(semver.Collection(vs)) + var addVer string + for _, v := range vs { + if c.Check(v) { + addVer = v.Original() + } + } + + if addVer == "" { + return nil, errors.Wrapf(err, "cannot find any tag complient with the constraint %s", imageTag) + } + + // Compose new complete image string if any complient version was found + image = fmt.Sprintf("%s:%s", imageBase, addVer) + } } cBytes, err := crane.Config(image) From e53c771aac83ffec35e16092726ffd9bd65d3219 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Thu, 16 May 2024 16:20:59 +0100 Subject: [PATCH 316/370] Moving functionality into a separate function Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/image.go | 98 ++++++++++++++++++-------------- 1 file changed, 54 insertions(+), 44 deletions(-) diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index 793e34218..b5ffa6a11 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -43,51 +43,10 @@ func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { // Strip the digest before fetching the image image = strings.SplitN(image, "@", 2)[0] } else if strings.Contains(image, ":") { - // Separate the image base and the image tag - parts := strings.SplitN(image, ":", 2) - imageBase := parts[0] - imageTag := parts[1] - - // Check if the tag is a constraint - isConstraint := true - c, err := semver.NewConstraint(imageTag) + var err error + image, err = findImageTagForVersionConstraint(image) if err != nil { - isConstraint = false - } - - if isConstraint { - // Fetch all image tags - tags, err := crane.ListTags(imageBase) - if err != nil { - return nil, errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) - } - - // Convert tags to semver versions - vs := []*semver.Version{} - for _, r := range tags { - v, err := semver.NewVersion(r) - if err != nil { - // We skip any tags that are not valid semantic versions - continue - } - vs = append(vs, v) - } - - // Sort all versions and find the last version complient with the constraint - sort.Sort(semver.Collection(vs)) - var addVer string - for _, v := range vs { - if c.Check(v) { - addVer = v.Original() - } - } - - if addVer == "" { - return nil, errors.Wrapf(err, "cannot find any tag complient with the constraint %s", imageTag) - } - - // Compose new complete image string if any complient version was found - image = fmt.Sprintf("%s:%s", imageBase, addVer) + return nil, errors.Wrapf(err, "cannot find image tag for version constraint") } } @@ -124,6 +83,57 @@ func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { return &ll, nil } +func findImageTagForVersionConstraint(image string) (string, error) { + // Separate the image base and the image tag + parts := strings.SplitN(image, ":", 2) + imageBase := parts[0] + imageTag := parts[1] + + // Check if the tag is a constraint + isConstraint := true + c, err := semver.NewConstraint(imageTag) + if err != nil { + isConstraint = false + } + + if isConstraint { + // Fetch all image tags + tags, err := crane.ListTags(imageBase) + if err != nil { + return "", errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) + } + + // Convert tags to semver versions + vs := []*semver.Version{} + for _, r := range tags { + v, err := semver.NewVersion(r) + if err != nil { + // We skip any tags that are not valid semantic versions + continue + } + vs = append(vs, v) + } + + // Sort all versions and find the last version complient with the constraint + sort.Sort(semver.Collection(vs)) + var addVer string + for _, v := range vs { + if c.Check(v) { + addVer = v.Original() + } + } + + if addVer == "" { + return "", errors.Wrapf(err, "cannot find any tag complient with the constraint %s", imageTag) + } + + // Compose new complete image string if any complient version was found + image = fmt.Sprintf("%s:%s", imageBase, addVer) + } + + return image, nil +} + func extractPackageContent(layer conregv1.Layer) ([][]byte, []byte, error) { rc, err := layer.Uncompressed() if err != nil { From ba1351038a1c58eb39affa9cfdf5535bce4aa88e Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Thu, 20 Jun 2024 18:52:05 +0100 Subject: [PATCH 317/370] Seach for newest compliant constraint in revese order Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/image.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index b5ffa6a11..bb2a35c09 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -115,11 +115,13 @@ func findImageTagForVersionConstraint(image string) (string, error) { } // Sort all versions and find the last version complient with the constraint - sort.Sort(semver.Collection(vs)) + sort.Sort(sort.Reverse(semver.Collection(vs))) var addVer string for _, v := range vs { if c.Check(v) { addVer = v.Original() + + break } } From 645386d5c7b8a34a78e3ed943ec6c926a96a87c8 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Wed, 3 Jul 2024 03:47:36 +0100 Subject: [PATCH 318/370] Fixing issues and adding test Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/image.go | 68 +++++++++-------- cmd/crank/beta/validate/image_test.go | 103 ++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 32 deletions(-) create mode 100644 cmd/crank/beta/validate/image_test.go diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index bb2a35c09..8cc0e50e1 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -85,9 +85,10 @@ func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { func findImageTagForVersionConstraint(image string) (string, error) { // Separate the image base and the image tag - parts := strings.SplitN(image, ":", 2) - imageBase := parts[0] - imageTag := parts[1] + parts := strings.Split(image, ":") + lastPart := len(parts) - 1 + imageBase := strings.Join(parts[0:lastPart], ":") + imageTag := parts[lastPart] // Check if the tag is a constraint isConstraint := true @@ -96,43 +97,46 @@ func findImageTagForVersionConstraint(image string) (string, error) { isConstraint = false } - if isConstraint { - // Fetch all image tags - tags, err := crane.ListTags(imageBase) - if err != nil { - return "", errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) - } - - // Convert tags to semver versions - vs := []*semver.Version{} - for _, r := range tags { - v, err := semver.NewVersion(r) - if err != nil { - // We skip any tags that are not valid semantic versions - continue - } - vs = append(vs, v) - } + // Return original image if no constraint was detected + if !isConstraint { + return image, nil + } - // Sort all versions and find the last version complient with the constraint - sort.Sort(sort.Reverse(semver.Collection(vs))) - var addVer string - for _, v := range vs { - if c.Check(v) { - addVer = v.Original() + // Fetch all image tags + tags, err := crane.ListTags(imageBase) + if err != nil { + return "", errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) + } - break - } + // Convert tags to semver versions + vs := []*semver.Version{} + for _, r := range tags { + v, err := semver.NewVersion(r) + if err != nil { + // We skip any tags that are not valid semantic versions + continue } + vs = append(vs, v) + } - if addVer == "" { - return "", errors.Wrapf(err, "cannot find any tag complient with the constraint %s", imageTag) + // Sort all versions and find the last version complient with the constraint + sort.Sort(sort.Reverse(semver.Collection(vs))) + var addVer string + for _, v := range vs { + if c.Check(v) { + addVer = v.Original() + + break } + } - // Compose new complete image string if any complient version was found - image = fmt.Sprintf("%s:%s", imageBase, addVer) + if addVer == "" { + return "", errors.Errorf("cannot find any tag complient with the constraint %s", imageTag) } + // Compose new complete image string if any complient version was found + image = fmt.Sprintf("%s:%s", imageBase, addVer) + return image, nil } diff --git a/cmd/crank/beta/validate/image_test.go b/cmd/crank/beta/validate/image_test.go new file mode 100644 index 000000000..e40a91cf5 --- /dev/null +++ b/cmd/crank/beta/validate/image_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validate + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestFindImageTagForVersionConstraint(t *testing.T) { + repoName := "ubuntu" + responseTags := []byte(`{"tags":["1.2.3","4.5.6"]}`) + cases := map[string]struct { + responseBody []byte + host string + constraint string + expectedImage string + expectError bool + }{ + "NoConstraint": { + responseBody: responseTags, + constraint: "1.2.3", + expectedImage: "ubuntu:1.2.3", + }, + "Constraint": { + responseBody: responseTags, + constraint: ">=1.2.3", + expectedImage: "ubuntu:4.5.6", + }, + "CannotFetchTags": { + responseBody: responseTags, + host: "wrong.host", + constraint: ">=4.5.6", + expectError: true, + }, + "NoMatchingTag": { + responseBody: responseTags, + constraint: ">4.5.6", + expectError: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tagsPath := fmt.Sprintf("/v2/%s/tags/list", repoName) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/": + w.WriteHeader(http.StatusOK) + case tagsPath: + if r.Method != http.MethodGet { + t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) + } + + w.Write(tc.responseBody) + default: + t.Fatalf("Unexpected path: %v", r.URL.Path) + } + })) + defer server.Close() + + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("url.Parse(%v) = %v", server.URL, err) + } + + host := u.Host + if tc.host != "" { + host = tc.host + } + + image, err := findImageTagForVersionConstraint(fmt.Sprintf("%s/%s:%s", host, repoName, tc.constraint)) + + expectedImage := "" + if !tc.expectError { + expectedImage = fmt.Sprintf("%s/%s", host, tc.expectedImage) + } + + if tc.expectError && err == nil { + t.Errorf("[%s] expected: error\n", name) + } else if expectedImage != image { + t.Errorf("[%s] expected: %s, got: %s\n", name, expectedImage, image) + } + }) + } +} From 3179927019f54e200fc8d67570a5807d7cbb29f5 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Wed, 17 Jul 2024 09:43:05 +0100 Subject: [PATCH 319/370] Adding more test cases Signed-off-by: Jiri Tyr --- cmd/crank/beta/validate/image_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/crank/beta/validate/image_test.go b/cmd/crank/beta/validate/image_test.go index e40a91cf5..406478b0f 100644 --- a/cmd/crank/beta/validate/image_test.go +++ b/cmd/crank/beta/validate/image_test.go @@ -44,6 +44,16 @@ func TestFindImageTagForVersionConstraint(t *testing.T) { constraint: ">=1.2.3", expectedImage: "ubuntu:4.5.6", }, + "ConstraintV": { + responseBody: responseTags, + constraint: ">=v1.2.3", + expectedImage: "ubuntu:4.5.6", + }, + "ConstraintPreRelease": { + responseBody: responseTags, + constraint: ">v4.5.6-rc.0.100.g658deda0.dirty", + expectedImage: "ubuntu:4.5.6", + }, "CannotFetchTags": { responseBody: responseTags, host: "wrong.host", From b7a5400d3367a8ed1f7b04f08909542ec199d95a Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Fri, 19 Jul 2024 22:11:02 +0300 Subject: [PATCH 320/370] change manager_test.go provider and function's apiVersion Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 20a14072c..6dd005314 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -44,7 +44,7 @@ spec: `) - providerYaml = []byte(`apiVersion: pkg.crossplane.io/v1 + providerYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1 kind: Provider metadata: name: provider-dep-1 @@ -54,7 +54,7 @@ spec: `) - funcYaml = []byte(`apiVersion: pkg.crossplane.io/v1beta1 + funcYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1beta1 kind: Function metadata: name: function-dep-1 From 83ce98fb421cbfe68ebd5d4caa0e16aafc48b6a1 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Mon, 22 Jul 2024 16:54:49 +0300 Subject: [PATCH 321/370] edit provider/function meta.pkg Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 6dd005314..1168547c8 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -32,6 +32,7 @@ import ( ) var ( + // config-pkg:v1.3.0 configPkg = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 kind: Configuration metadata: @@ -44,22 +45,20 @@ spec: `) + // provider-dep-1:v1.3.0 providerYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1 kind: Provider metadata: name: provider-dep-1 -spec: - package: provider-dep-1:v1.3.0 --- `) + // function-dep-1:v1.3.0 funcYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1beta1 kind: Function metadata: name: function-dep-1 -spec: - package: function-dep-1:v1.3.0 --- `) From a78817ce5604fca1f58ddba1d919d9c48beb7556 Mon Sep 17 00:00:00 2001 From: Lawrence Aiello Date: Fri, 19 Jul 2024 17:26:36 -0400 Subject: [PATCH 322/370] docs: add Rogo to ADOPTERS.md Signed-off-by: Lawrence Aiello Signed-off-by: aiell0 --- ADOPTERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 0e6c0a91d..328155ff9 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -89,4 +89,5 @@ This list is sorted in the order that organizations were added to it. | [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane in production environments to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | | [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | -| [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | \ No newline at end of file +| [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | +| [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers. | From 9d21171ea1133224068c12df0c6a8cc212d598f9 Mon Sep 17 00:00:00 2001 From: aiell0 Date: Mon, 22 Jul 2024 12:59:26 -0400 Subject: [PATCH 323/370] docs: specify we run in production Signed-off-by: aiell0 --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 328155ff9..c6b042f90 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -90,4 +90,4 @@ This list is sorted in the order that organizations were added to it. | [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | | [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | -| [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers. | +| [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers in our production environments. | From 959db21684d18c2eb45748dbef5da44e21b188e4 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Mon, 22 Jul 2024 23:38:20 +0300 Subject: [PATCH 324/370] use global cache directory at validate Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/cmd.go | 13 ++----------- cmd/crank/beta/validate/manager.go | 1 - 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index f684ffd57..0f2fa3912 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -36,7 +36,7 @@ type Cmd struct { Resources string `arg:"" help:"Resources source which can be a file, directory, or '-' for standard input."` // Flags. Keep them in alphabetical order. - CacheDir string `default:".crossplane/cache" help:"Absolute path to the cache directory where downloaded schemas are stored."` + CacheDir string `default:"~/.crossplane/cache" help:"Absolute path to the cache directory where downloaded schemas are stored."` CleanCache bool `help:"Clean the cache directory before downloading package schemas."` SkipSuccessResults bool `help:"Skip printing success results."` @@ -51,7 +51,7 @@ CRDs, providers, and configurations. The output of the "crossplane beta render" piped to this validate command in order to rapidly validate on the outputs of the composition development experience. If providers or configurations are provided as extensions, they will be downloaded and loaded as CRDs before performing -validation. If the cache directory is not provided, it will default to ".crossplane/cache" in the current workspace. +validation. If the cache directory is not provided, it will default to "~/.crossplane/cache" in the current workspace. Cache directory can be cleaned before downloading schemas by setting the "clean-cache" flag. All validation is performed offline locally using the Kubernetes API server's validation library, so it does not require @@ -109,15 +109,6 @@ func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { return errors.Wrapf(err, "cannot load resources from %q", c.Resources) } - // Update default cache directory to absolute path based on the current working directory - if c.CacheDir == defaultCacheDir { - currentPath, err := os.Getwd() - if err != nil { - return errors.Wrapf(err, "cannot get current path") - } - c.CacheDir = filepath.Join(currentPath, c.CacheDir) - } - if strings.HasPrefix(c.CacheDir, "~/") { homeDir, _ := os.UserHomeDir() c.CacheDir = filepath.Join(homeDir, c.CacheDir[2:]) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 0966cf09f..56898180e 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -35,7 +35,6 @@ import ( ) const ( - defaultCacheDir = ".crossplane/cache" packageFileName = "package.yaml" baseLayerLabel = "base" From 0170de4188e6a7b1d76d309ca7bb3abd60104b6e Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Wed, 24 Jul 2024 00:48:38 +0300 Subject: [PATCH 325/370] Update cmd/crank/beta/validate/cmd.go Co-authored-by: Jared Watts Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- cmd/crank/beta/validate/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index 0f2fa3912..2a7a372a0 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -51,7 +51,7 @@ CRDs, providers, and configurations. The output of the "crossplane beta render" piped to this validate command in order to rapidly validate on the outputs of the composition development experience. If providers or configurations are provided as extensions, they will be downloaded and loaded as CRDs before performing -validation. If the cache directory is not provided, it will default to "~/.crossplane/cache" in the current workspace. +validation. If the cache directory is not provided, it will default to "~/.crossplane/cache". Cache directory can be cleaned before downloading schemas by setting the "clean-cache" flag. All validation is performed offline locally using the Kubernetes API server's validation library, so it does not require From f16020febeaba09012b949dd2475c0feb8e8e13d Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Mon, 1 Jul 2024 03:09:55 +0300 Subject: [PATCH 326/370] download/cache full graph of dependencies at beta validate Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager.go | 4 + cmd/crank/beta/validate/manager_test.go | 107 ++++++++++++++++++++++++ 2 files changed, 111 insertions(+) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 56898180e..294ad93af 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -216,6 +216,10 @@ func (m *Manager) addDependencies() error { if len(image) > 0 { image = fmt.Sprintf(imageFmt, image, dep.Version) m.deps[image] = true + + if _, ok := m.confs[image]; !ok && dep.Configuration != nil { + m.confs[image] = true + } } } } diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 1168547c8..2976f40ae 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -61,6 +61,45 @@ metadata: name: function-dep-1 --- +`) + + configBase = unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-base", + }, + "spec": map[string]interface{}{ + "package": "config-dep-1:v1.3.0", + }, + }, + } + + configDep1Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: +name: config-dep-1 +spec: +dependsOn: +- configuration: config-dep-2 + version: "v1.3.0" +--- + +`) + + configDep2Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: +name: config-dep-2 +spec: +dependsOn: +- provider: provider-dep-1 + version: "v1.3.0" +- function: function-dep-1 + version: "v1.3.0" +--- + `) ) @@ -237,3 +276,71 @@ type MockFetcher struct { func (m *MockFetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { return m.fetch(image) } + +func TestAddDependencies(t *testing.T) { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager(".crossplane/cache", fs, w) + m.PrepExtensions([]*unstructured.Unstructured{&configBase}) + + cd1 := static.NewLayer(configDep1Yaml, types.OCILayer) + cd2 := static.NewLayer(configDep2Yaml, types.OCILayer) + pd1 := static.NewLayer(providerYaml, types.OCILayer) + fd1 := static.NewLayer(funcYaml, types.OCILayer) + + type args struct { + fetchMock func(image string) (*conregv1.Layer, error) + } + type want struct { + confs int + deps int + err error + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessfulDependenciesAddition": { + reason: "All dependencies should be successfully fetched and added", + args: args{ + fetchMock: func(image string) (*conregv1.Layer, error) { + switch image { + case "config-dep-1:v1.3.0": + return &cd1, nil + case "config-dep-2:v1.3.0": + return &cd2, nil + case "provider-dep-1:v1.3.0": + return &pd1, nil + case "function-dep-1:v1.3.0": + return &fd1, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + }, + }, + want: want{ + confs: 2, + deps: 4, + err: nil, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + m.fetcher = &MockFetcher{tc.args.fetchMock} + err := m.addDependencies() + + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.confs, len(m.confs)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want confs, +got confs:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.deps, len(m.deps)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want deps, +got deps:\n%s", tc.reason, diff) + } + }) + } +} From aaad84235eb76b6004d9f517b786ddd0b81dc397 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Sat, 13 Jul 2024 12:03:17 +0300 Subject: [PATCH 327/370] recursively add dependencies Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager.go | 15 +++++++++++---- cmd/crank/beta/validate/manager_test.go | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 294ad93af..a5ac2207f 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -165,7 +165,7 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { return errors.Wrapf(err, "cannot initialize cache directory") } - if err := m.addDependencies(); err != nil { + if err := m.addDependencies(m.confs); err != nil { return errors.Wrapf(err, "cannot add package dependencies") } @@ -181,8 +181,14 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { return m.PrepExtensions(schemas) } -func (m *Manager) addDependencies() error { - for image := range m.confs { +// TODO(enesonus): update function to be inline with https://github.com/crossplane/crossplane/pull/5815 confs type +func (m *Manager) addDependencies(confs map[string]bool) error { + if len(confs) == 0 { + return nil + } + + deepConfs := make(map[string]bool) + for image := range confs { cfg := m.confs[image] if cfg == nil { @@ -218,13 +224,14 @@ func (m *Manager) addDependencies() error { m.deps[image] = true if _, ok := m.confs[image]; !ok && dep.Configuration != nil { + deepConfs[image] = true m.confs[image] = true } } } } - return nil + return m.addDependencies(deepConfs) } func (m *Manager) cacheDependencies() error { diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 2976f40ae..5e9c7884e 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -330,7 +330,7 @@ func TestAddDependencies(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { m.fetcher = &MockFetcher{tc.args.fetchMock} - err := m.addDependencies() + err := m.addDependencies(m.confs) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) From 7634a6e3c5123dc620ab77f94b66cb2d7765fa80 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Wed, 24 Jul 2024 13:54:23 +0300 Subject: [PATCH 328/370] update m.confs var type and tests Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager.go | 9 +- cmd/crank/beta/validate/manager_test.go | 131 +++++++++++++----------- 2 files changed, 75 insertions(+), 65 deletions(-) diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index a5ac2207f..5dae3ce7a 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -181,13 +181,12 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { return m.PrepExtensions(schemas) } -// TODO(enesonus): update function to be inline with https://github.com/crossplane/crossplane/pull/5815 confs type -func (m *Manager) addDependencies(confs map[string]bool) error { +func (m *Manager) addDependencies(confs map[string]*metav1.Configuration) error { if len(confs) == 0 { return nil } - deepConfs := make(map[string]bool) + deepConfs := make(map[string]*metav1.Configuration) for image := range confs { cfg := m.confs[image] @@ -224,8 +223,8 @@ func (m *Manager) addDependencies(confs map[string]bool) error { m.deps[image] = true if _, ok := m.confs[image]; !ok && dep.Configuration != nil { - deepConfs[image] = true - m.confs[image] = true + deepConfs[image] = nil + m.confs[image] = nil } } } diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 5e9c7884e..175d05249 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -32,7 +32,7 @@ import ( ) var ( - // config-pkg:v1.3.0 + // config-pkg:v1.3.0. configPkg = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 kind: Configuration metadata: @@ -45,7 +45,7 @@ spec: `) - // provider-dep-1:v1.3.0 + // provider-dep-1:v1.3.0. providerYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1 kind: Provider metadata: @@ -54,7 +54,7 @@ metadata: `) - // function-dep-1:v1.3.0 + // function-dep-1:v1.3.0. funcYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1beta1 kind: Function metadata: @@ -63,41 +63,30 @@ metadata: `) - configBase = unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "pkg.crossplane.io/v1alpha1", - "kind": "Configuration", - "metadata": map[string]interface{}{ - "name": "config-base", - }, - "spec": map[string]interface{}{ - "package": "config-dep-1:v1.3.0", - }, - }, - } - + // config-dep-1:v1.3.0. configDep1Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 kind: Configuration metadata: -name: config-dep-1 + name: config-dep-1 spec: -dependsOn: -- configuration: config-dep-2 - version: "v1.3.0" + dependsOn: + - configuration: config-dep-2 + version: "v1.3.0" --- `) + // config-dep-2:v1.3.0. configDep2Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 kind: Configuration metadata: -name: config-dep-2 + name: config-dep-2 spec: -dependsOn: -- provider: provider-dep-1 - version: "v1.3.0" -- function: function-dep-1 - version: "v1.3.0" + dependsOn: + - provider: provider-dep-1 + version: "v1.3.0" + - function: function-dep-1 + version: "v1.3.0" --- `) @@ -136,7 +125,7 @@ func TestConfigurationTypeSupport(t *testing.T) { want want }{ "SuccessfulConfigPkg": { - //config-pkg + // config-pkg //└─►provider-dep-1 reason: "All dependencies should be successfully added from Configuration.pkg", args: args{ @@ -163,7 +152,7 @@ func TestConfigurationTypeSupport(t *testing.T) { }, }, "SuccessfulConfigMeta": { - //config-meta + // config-meta //└─►function-dep-1 reason: "All dependencies should be successfully added from Configuration.meta", args: args{ @@ -195,7 +184,7 @@ func TestConfigurationTypeSupport(t *testing.T) { }, }, "SuccessfulConfigMetaAndPkg": { - //config-meta + // config-meta //└─►function-dep-1 //config-pkg //└─►provider-dep-1 @@ -254,7 +243,7 @@ func TestConfigurationTypeSupport(t *testing.T) { t.Errorf("\n%s\nPrepExtensions(...): -want error, +got error:\n%s", tc.reason, diff) } - err = m.addDependencies() + err = m.addDependencies(m.confs) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) } @@ -269,28 +258,30 @@ func TestConfigurationTypeSupport(t *testing.T) { } } -type MockFetcher struct { - fetch func(image string) (*conregv1.Layer, error) -} - -func (m *MockFetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { - return m.fetch(image) -} - func TestAddDependencies(t *testing.T) { - fs := afero.NewMemMapFs() - w := &bytes.Buffer{} - - m := NewManager(".crossplane/cache", fs, w) - m.PrepExtensions([]*unstructured.Unstructured{&configBase}) - cd1 := static.NewLayer(configDep1Yaml, types.OCILayer) cd2 := static.NewLayer(configDep2Yaml, types.OCILayer) pd1 := static.NewLayer(providerYaml, types.OCILayer) fd1 := static.NewLayer(funcYaml, types.OCILayer) + fetchMockFunc := func(image string) (*conregv1.Layer, error) { + switch image { + case "config-dep-1:v1.3.0": + return &cd1, nil + case "config-dep-2:v1.3.0": + return &cd2, nil + case "provider-dep-1:v1.3.0": + return &pd1, nil + case "function-dep-1:v1.3.0": + return &fd1, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + } + type args struct { - fetchMock func(image string) (*conregv1.Layer, error) + extensions []*unstructured.Unstructured + fetchMock func(image string) (*conregv1.Layer, error) } type want struct { confs int @@ -303,32 +294,44 @@ func TestAddDependencies(t *testing.T) { want want }{ "SuccessfulDependenciesAddition": { + // config-dep-1 + //└─►config-dep-2 + //config-dep-2 + //├─►provider-dep-1 + //└─►function-dep-1 reason: "All dependencies should be successfully fetched and added", args: args{ - fetchMock: func(image string) (*conregv1.Layer, error) { - switch image { - case "config-dep-1:v1.3.0": - return &cd1, nil - case "config-dep-2:v1.3.0": - return &cd2, nil - case "provider-dep-1:v1.3.0": - return &pd1, nil - case "function-dep-1:v1.3.0": - return &fd1, nil - default: - return nil, fmt.Errorf("unknown image: %s", image) - } + fetchMock: fetchMockFunc, + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-dep-1", + }, + "spec": map[string]interface{}{ + "package": "config-dep-1:v1.3.0", + }, + }, + }, }, }, want: want{ - confs: 2, - deps: 4, + confs: 2, // 1 Base Configuration, 1 Child Configuration + deps: 4, // 2 Configurations, 1 provider, 1 function err: nil, }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager(".crossplane/cache", fs, w) + _ = m.PrepExtensions(tc.args.extensions) + m.fetcher = &MockFetcher{tc.args.fetchMock} err := m.addDependencies(m.confs) @@ -344,3 +347,11 @@ func TestAddDependencies(t *testing.T) { }) } } + +type MockFetcher struct { + fetch func(image string) (*conregv1.Layer, error) +} + +func (m *MockFetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { + return m.fetch(image) +} From 8f551eefe3a176ed21dd2d22290b1765837b1a73 Mon Sep 17 00:00:00 2001 From: Mehmet Enes <94247411+enesonus@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:49:03 +0300 Subject: [PATCH 329/370] Update cmd/crank/beta/validate/manager_test.go Co-authored-by: Jared Watts Signed-off-by: Mehmet Enes <94247411+enesonus@users.noreply.github.com> --- cmd/crank/beta/validate/manager_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 175d05249..998ea3100 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -296,9 +296,8 @@ func TestAddDependencies(t *testing.T) { "SuccessfulDependenciesAddition": { // config-dep-1 //└─►config-dep-2 - //config-dep-2 - //├─►provider-dep-1 - //└─►function-dep-1 + // ├─►provider-dep-1 + // └─►function-dep-1 reason: "All dependencies should be successfully fetched and added", args: args{ fetchMock: fetchMockFunc, From 93dff5dae11213030d69480a687f47d2c28ea6f5 Mon Sep 17 00:00:00 2001 From: Mehmet Enes Date: Tue, 30 Jul 2024 18:15:31 +0300 Subject: [PATCH 330/370] update tests for readability Signed-off-by: Mehmet Enes --- cmd/crank/beta/validate/manager_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 998ea3100..839f065dc 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -126,7 +126,7 @@ func TestConfigurationTypeSupport(t *testing.T) { }{ "SuccessfulConfigPkg": { // config-pkg - //└─►provider-dep-1 + // └─►provider-dep-1 reason: "All dependencies should be successfully added from Configuration.pkg", args: args{ extensions: []*unstructured.Unstructured{ @@ -153,7 +153,7 @@ func TestConfigurationTypeSupport(t *testing.T) { }, "SuccessfulConfigMeta": { // config-meta - //└─►function-dep-1 + // └─►function-dep-1 reason: "All dependencies should be successfully added from Configuration.meta", args: args{ extensions: []*unstructured.Unstructured{ @@ -185,7 +185,7 @@ func TestConfigurationTypeSupport(t *testing.T) { }, "SuccessfulConfigMetaAndPkg": { // config-meta - //└─►function-dep-1 + // └─►function-dep-1 //config-pkg //└─►provider-dep-1 reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", @@ -234,7 +234,7 @@ func TestConfigurationTypeSupport(t *testing.T) { fs := afero.NewMemMapFs() w := &bytes.Buffer{} - m := NewManager(".crossplane/cache", fs, w) + m := NewManager("", fs, w) t.Run(name, func(t *testing.T) { m.fetcher = &MockFetcher{tc.args.fetchMock} err := m.PrepExtensions(tc.args.extensions) @@ -295,7 +295,7 @@ func TestAddDependencies(t *testing.T) { }{ "SuccessfulDependenciesAddition": { // config-dep-1 - //└─►config-dep-2 + // └─►config-dep-2 // ├─►provider-dep-1 // └─►function-dep-1 reason: "All dependencies should be successfully fetched and added", @@ -317,8 +317,8 @@ func TestAddDependencies(t *testing.T) { }, }, want: want{ - confs: 2, // 1 Base Configuration, 1 Child Configuration - deps: 4, // 2 Configurations, 1 provider, 1 function + confs: 2, // 1 Base configuration (config-dep-1), 1 child configuration (config-dep-2) + deps: 4, // 2 configurations (config-dep-1, config-dep-2), 1 provider (provider-dep-1), 1 function (function-dep-1) err: nil, }, }, @@ -328,7 +328,7 @@ func TestAddDependencies(t *testing.T) { fs := afero.NewMemMapFs() w := &bytes.Buffer{} - m := NewManager(".crossplane/cache", fs, w) + m := NewManager("", fs, w) _ = m.PrepExtensions(tc.args.extensions) m.fetcher = &MockFetcher{tc.args.fetchMock} From b902a0eea6addb925449184d0b6f1103b575413d Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 08:11:50 +0000 Subject: [PATCH 331/370] fix(deps): update module github.com/docker/docker to v25.0.6+incompatible [security] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4a107f8dd..54d37eff4 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.9.0 github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b - github.com/docker/docker v25.0.5+incompatible + github.com/docker/docker v25.0.6+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.2 github.com/go-git/go-billy/v5 v5.5.0 diff --git a/go.sum b/go.sum index f5dd9f816..8996d82aa 100644 --- a/go.sum +++ b/go.sum @@ -150,8 +150,8 @@ github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1x github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= From a045274ab7f82cd51a966208d5ba6ebbcc37eecf Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Tue, 6 Aug 2024 13:06:18 +0300 Subject: [PATCH 332/370] Retry on conflict during CRD dry-run updates in XRD validation webhook Signed-off-by: Alper Rifat Ulucinar --- .../apiextensions/v1/xrd/handler.go | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/internal/validation/apiextensions/v1/xrd/handler.go b/internal/validation/apiextensions/v1/xrd/handler.go index 40b5ed9e9..ce58cacec 100644 --- a/internal/validation/apiextensions/v1/xrd/handler.go +++ b/internal/validation/apiextensions/v1/xrd/handler.go @@ -25,6 +25,7 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -145,16 +146,18 @@ func (v *validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.O } func (v *validator) dryRunUpdateOrCreateIfNotFound(ctx context.Context, crd *apiextv1.CustomResourceDefinition) error { - got := crd.DeepCopy() - err := v.client.Get(ctx, client.ObjectKey{Name: crd.Name}, got) - if err == nil { - got.Spec = crd.Spec - return v.client.Update(ctx, got, client.DryRunAll) - } - if kerrors.IsNotFound(err) { - return v.client.Create(ctx, crd, client.DryRunAll) - } - return err + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + got := crd.DeepCopy() + err := v.client.Get(ctx, client.ObjectKey{Name: crd.Name}, got) + if err == nil { + got.Spec = crd.Spec + return v.client.Update(ctx, got, client.DryRunAll) + } + if kerrors.IsNotFound(err) { + return v.client.Create(ctx, crd, client.DryRunAll) + } + return err + }) } // ValidateDelete always allows delete requests. From f07c1817467793c21227efdb08cb3495737462f6 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 9 Aug 2024 17:28:37 -0700 Subject: [PATCH 333/370] Automatic linter fix in cmd This happened when I ran `earthly +reviewable`. Not sure how this got into the master branch? Signed-off-by: Nic Cope --- cmd/crank/beta/validate/manager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 839f065dc..80461809c 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -186,7 +186,7 @@ func TestConfigurationTypeSupport(t *testing.T) { "SuccessfulConfigMetaAndPkg": { // config-meta // └─►function-dep-1 - //config-pkg + // config-pkg //└─►provider-dep-1 reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", args: args{ From 352b24b2a79bac0382fb095e1cadfa88120cd58f Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 9 Aug 2024 17:29:06 -0700 Subject: [PATCH 334/370] Delete resources that don't have a controller but appear in resourceRefs Previously if a composed resource appeared in an XR's spec.resourceRefs but didn't have a controller reference the XR would refuse to garbage collect it. The XR would then remove the composed resource from its resource refs, effectively orphaning it. Now if the composed resource has _no_ controller, the XR will delete it. Most likely it was owned by the XR, then had its controller ref stripped (e.g. due to being backed up and restored using a tool like Velero). If the composed resource is controlled by another resource, we'll now return an error rather than silently orphaning it. Signed-off-by: Nic Cope --- .../composite/composition_functions.go | 15 ++++++++++++--- .../composite/composition_functions_test.go | 14 ++++++++++++-- .../apiextensions/composite/composition_pt.go | 14 +++++++++++--- .../composite/composition_pt_test.go | 9 ++++++--- 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index d44210a04..b8fa05beb 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -73,6 +73,7 @@ const ( errFmtUnmarshalPipelineStepInput = "cannot unmarshal input for Composition pipeline step %q" errFmtGetCredentialsFromSecret = "cannot get Composition pipeline step %q credential %q from Secret" errFmtRunPipelineStep = "cannot run Composition pipeline step %q" + errFmtControllerMismatch = "refusing to delete composed resource %q that is controlled by %s %q" errFmtDeleteCD = "cannot delete composed resource %q (a %s named %s)" errFmtUnmarshalDesiredCD = "cannot unmarshal desired composed resource %q from RunFunctionResponse" errFmtCDAsStruct = "cannot encode composed resource %q to protocol buffer Struct well-known type" @@ -864,9 +865,17 @@ func (d *DeletingComposedResourceGarbageCollector) GarbageCollectComposedResourc } for name, cd := range del { - // We want to garbage collect this resource, but we don't control it. - if c := metav1.GetControllerOf(cd.Resource); c == nil || c.UID != owner.GetUID() { - continue + // Don't garbage collect composed resources that someone else controls. + // + // We do garbage collect composed resources that no-one controls. If a + // composed resource appears in observed (i.e. appears in the XR's + // spec.resourceRefs) but doesn't have a controller ref, most likely we + // created it but its controller ref was stripped. In this situation it + // would be permissible for us to adopt the composed resource by setting + // our XR as the controller ref, then delete it. So we may as well just + // go straight to deleting it. + if c := metav1.GetControllerOf(cd.Resource); c != nil && c.UID != owner.GetUID() { + return errors.Errorf(errFmtControllerMismatch, name, c.Kind, c.Name) } if err := d.client.Delete(ctx, cd.Resource); resource.IgnoreNotFound(err) != nil { diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 40aafeef1..3401ba4f8 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -1466,11 +1466,21 @@ func TestGarbageCollectComposedResources(t *testing.T) { }, }, observed: ComposedResourceStates{ - "undesired-resource": ComposedResourceState{Resource: &fake.Composed{}}, + "undesired-resource": ComposedResourceState{Resource: &fake.Composed{ + ObjectMeta: metav1.ObjectMeta{ + // This resource isn't controlled by the XR. + OwnerReferences: []metav1.OwnerReference{{ + Controller: ptr.To(true), + UID: "a-different-xr", + Kind: "XR", + Name: "different", + }}, + }, + }}, }, }, want: want{ - err: nil, + err: errors.New(`refusing to delete composed resource "undesired-resource" that is controlled by XR "different"`), }, }, "DeleteError": { diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index 293952528..394381baa 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -516,9 +516,17 @@ func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr continue } - // We want to garbage collect this resource, but we don't control it. - if c := metav1.GetControllerOf(cd); c == nil || c.UID != cr.GetUID() { - continue + // Don't garbage collect composed resources that someone else controls. + // + // We do garbage collect composed resources that no-one controls. If a + // composed resource appears in observed (i.e. appears in the XR's + // spec.resourceRefs) but doesn't have a controller ref, most likely we + // created it but its controller ref was stripped. In this situation it + // would be permissible for us to adopt the composed resource by setting + // our XR as the controller ref, then delete it. So we may as well just + // go straight to deleting it. + if c := metav1.GetControllerOf(cd); c != nil && c.UID != cr.GetUID() { + return nil, errors.Errorf(errFmtControllerMismatch, name, c.Kind, c.Name) } // This existing resource does not correspond to an extant template. It diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index edc2fb29a..04c6f8440 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -649,7 +649,7 @@ func TestGarbageCollectingAssociator(t *testing.T) { }, }, "ResourceControlledBySomeoneElse": { - reason: "We should not garbage colle_ a resource that is controlled by another resource.", + reason: "We should not garbage collect a resource that is controlled by another resource.", c: &test.MockClient{ MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { // The template used to create this resource is no longer known to us. @@ -661,6 +661,8 @@ func TestGarbageCollectingAssociator(t *testing.T) { Controller: &ctrl, BlockOwnerDeletion: &ctrl, UID: types.UID("who-dat"), + Kind: "XR", + Name: "different", }}) return nil }), @@ -673,11 +675,11 @@ func TestGarbageCollectingAssociator(t *testing.T) { ct: []v1.ComposedTemplate{t0}, }, want: want{ - tas: []TemplateAssociation{{Template: t0}}, + err: errors.New(`refusing to delete composed resource "unknown" that is controlled by XR "different"`), }, }, "ResourceNotControlled": { - reason: "We should not garbage colle_ a resource that has no controller reference.", + reason: "We should garbage collect a resource that has no controller reference.", c: &test.MockClient{ MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { // The template used to create this resource is no longer known to us. @@ -686,6 +688,7 @@ func TestGarbageCollectingAssociator(t *testing.T) { // This resource is not controlled by anyone. return nil }), + MockDelete: test.NewMockDeleteFn(nil), }, args: args{ cr: &fake.Composite{ From 23b1adaa59f1a66ebc3401f91b45d5c70344a0f9 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Mon, 12 Aug 2024 15:48:47 +0200 Subject: [PATCH 335/370] fix: exit on fatal handling extra resources Signed-off-by: Philippe Scorsolini --- cmd/crank/beta/render/render.go | 10 +++++++++- .../apiextensions/composite/composition_functions.go | 8 ++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index c03e4d581..31b9661f8 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -182,6 +182,7 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) // Used to store the response of the function at the previous iteration. var rsp *fnv1beta1.RunFunctionResponse + extraResourcesLoop: for i := int64(0); i <= composite.MaxRequirementsIterations; i++ { if i == composite.MaxRequirementsIterations { // The requirements didn't stabilize after the maximum number of iterations. @@ -193,10 +194,17 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) return Outputs{}, errors.Wrapf(err, "cannot run pipeline step %q", fn.Step) } + for _, rs := range rsp.GetResults() { + if rs.GetSeverity() == fnv1beta1.Severity_SEVERITY_FATAL { + // We won't iterate if the function returned a fatal result, we'll handle results after the loop. + break + } + } + newRequirements := rsp.GetRequirements() if reflect.DeepEqual(newRequirements, requirements) { // The requirements are stable, the function is done. - break + break extraResourcesLoop } // Store the requirements for the next iteration. diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index d44210a04..33846f255 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -346,6 +346,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // Used to store the response of the function at the previous iteration. var rsp *v1beta1.RunFunctionResponse + extraResourcesLoop: for i := int64(0); i <= MaxRequirementsIterations; i++ { if i == MaxRequirementsIterations { // The requirements didn't stabilize after the maximum number of iterations. @@ -365,6 +366,13 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur break } + for _, rs := range rsp.GetResults() { + if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + // We won't iterate if the function returned a fatal result, we'll handle results after the loop. + break extraResourcesLoop + } + } + newRequirements := rsp.GetRequirements() if reflect.DeepEqual(newRequirements, requirements) { // The requirements stabilized, the function is done. From d31a117147684cacd2d400b57804b44e237b287e Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 13 Aug 2024 10:53:17 +0200 Subject: [PATCH 336/370] refactor: avoid using named loops Signed-off-by: Philippe Scorsolini --- .../composite/composition_functions.go | 115 +++++++++--------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index 33846f255..c9cbd9531 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -341,63 +341,9 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur } } - // Used to store the requirements returned at the previous iteration. - var requirements *v1beta1.Requirements - // Used to store the response of the function at the previous iteration. - var rsp *v1beta1.RunFunctionResponse - - extraResourcesLoop: - for i := int64(0); i <= MaxRequirementsIterations; i++ { - if i == MaxRequirementsIterations { - // The requirements didn't stabilize after the maximum number of iterations. - return CompositionResult{}, errors.Errorf(errFmtFunctionMaxIterations, fn.Step, MaxRequirementsIterations) - } - - // TODO(negz): Generate a content-addressable tag for this request. - // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? - rsp, err = c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) - if err != nil { - return CompositionResult{}, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) - } - - if c.composite.ExtraResourcesFetcher == nil { - // If we don't have an extra resources getter, we don't need to - // iterate to satisfy the requirements. - break - } - - for _, rs := range rsp.GetResults() { - if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { - // We won't iterate if the function returned a fatal result, we'll handle results after the loop. - break extraResourcesLoop - } - } - - newRequirements := rsp.GetRequirements() - if reflect.DeepEqual(newRequirements, requirements) { - // The requirements stabilized, the function is done. - break - } - - // Store the requirements for the next iteration. - requirements = newRequirements - - // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*v1beta1.Resources) - - // Fetch the requested resources and add them to the desired state. - for name, selector := range newRequirements.GetExtraResources() { - resources, err := c.composite.ExtraResourcesFetcher.Fetch(ctx, selector) - if err != nil { - return CompositionResult{}, errors.Wrapf(err, "fetching resources for %s", name) - } - - // Resources would be nil in case of not found resources. - req.ExtraResources[name] = resources - } - - // Pass down the updated context across iterations. - req.Context = rsp.GetContext() + rsp, err := c.runFunction(ctx, fn, req) + if err != nil { + return CompositionResult{}, err } // Pass the desired state returned by this Function to the next one. @@ -624,6 +570,61 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events, Conditions: conditions}, nil } +func (c *FunctionComposer) runFunction(ctx context.Context, fn v1.PipelineStep, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + // Used to store the requirements returned at the previous iteration. + var requirements *v1beta1.Requirements + + for i := int64(0); i <= MaxRequirementsIterations; i++ { + // TODO(negz): Generate a content-addressable tag for this request. + // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? + rsp, err := c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) + if err != nil { + return nil, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) + } + + if c.composite.ExtraResourcesFetcher == nil { + // If we don't have an extra resources getter, we don't need to + // iterate to satisfy the requirements. + return rsp, nil + } + + for _, rs := range rsp.GetResults() { + if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + // We won't iterate if the function returned a fatal result, we'll handle results after the loop. + return rsp, nil + } + } + + newRequirements := rsp.GetRequirements() + if reflect.DeepEqual(newRequirements, requirements) { + // The requirements stabilized, the function is done. + return rsp, nil + } + + // Store the requirements for the next iteration. + requirements = newRequirements + + // Cleanup the extra resources from the previous iteration to store the new ones + req.ExtraResources = make(map[string]*v1beta1.Resources) + + // Fetch the requested resources and add them to the desired state. + for name, selector := range newRequirements.GetExtraResources() { + resources, err := c.composite.ExtraResourcesFetcher.Fetch(ctx, selector) + if err != nil { + return nil, errors.Wrapf(err, "fetching resources for %s", name) + } + + // Resources would be nil in case of not found resources. + req.ExtraResources[name] = resources + } + + // Pass down the updated context across iterations. + req.Context = rsp.GetContext() + } + // The requirements didn't stabilize after the maximum number of iterations. + return nil, errors.Errorf(errFmtFunctionMaxIterations, fn.Step, MaxRequirementsIterations) +} + // ComposedFieldOwnerName generates a unique field owner name // for a given Crossplane composite resource (XR). This uniqueness is crucial to // prevent multiple XRs, which compose the same resource, from continuously From a808bf15cfc26d12471deda4ebcc45145382cc3e Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 13 Aug 2024 19:42:15 -0700 Subject: [PATCH 337/370] Add a FunctionRunner that supports extra resources This breaks extra resource handling out of the main function composer loop (and crossplane beta render loop). It's now implemented as a FunctionRunner that wraps the existing FunctionRunner implementation, fetching extra resources as needed. Signed-off-by: Nic Cope --- cmd/crank/beta/render/render.go | 167 ++++--- cmd/crank/beta/render/render_test.go | 53 +- cmd/crank/beta/validate/manager_test.go | 2 +- .../composite/composition_functions.go | 139 +----- .../composite/composition_functions_test.go | 433 ---------------- .../composite/extra_resources.go | 159 ++++++ .../composite/extra_resources_test.go | 468 ++++++++++++++++++ .../apiextensions/definition/reconciler.go | 5 +- 8 files changed, 760 insertions(+), 666 deletions(-) create mode 100644 internal/controller/apiextensions/composite/extra_resources.go create mode 100644 internal/controller/apiextensions/composite/extra_resources_test.go diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index 31b9661f8..1564dfd86 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -20,8 +20,8 @@ import ( "context" "encoding/json" "fmt" - "reflect" "sort" + "sync" "time" "google.golang.org/grpc" @@ -88,35 +88,91 @@ type Outputs struct { // are in use? } -// Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. -func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocognit // TODO(negz): Should we refactor to break this up a bit? - // Run our Functions. +// A RuntimeFunctionRunner is a composite.FunctionRunner that runs functions +// locally, using the runtime configured in their annotations (e.g. Docker). +type RuntimeFunctionRunner struct { + contexts map[string]RuntimeContext + conns map[string]*grpc.ClientConn + mx sync.Mutex +} + +// NewRuntimeFunctionRunner returns a FunctionRunner that runs functions +// locally, using the runtime configured in their annotations (e.g. Docker). It +// starts all the functions and creates gRPC connections when called. +func NewRuntimeFunctionRunner(ctx context.Context, log logging.Logger, fns []pkgv1beta1.Function) (*RuntimeFunctionRunner, error) { + contexts := map[string]RuntimeContext{} conns := map[string]*grpc.ClientConn{} - for _, fn := range in.Functions { + + for _, fn := range fns { runtime, err := GetRuntime(fn, log) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot get runtime for Function %q", fn.GetName()) + return nil, errors.Wrapf(err, "cannot get runtime for Function %q", fn.GetName()) } rctx, err := runtime.Start(ctx) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot start Function %q", fn.GetName()) + return nil, errors.Wrapf(err, "cannot start Function %q", fn.GetName()) } - defer func() { - if err := rctx.Stop(ctx); err != nil { - log.Debug("Error stopping function runtime", "function", fn.GetName(), "error", err) - } - }() + contexts[fn.GetName()] = rctx conn, err := grpc.DialContext(ctx, rctx.Target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(waitForReady)) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot dial Function %q at address %q", fn.GetName(), rctx.Target) + return nil, errors.Wrapf(err, "cannot dial Function %q at address %q", fn.GetName(), rctx.Target) } - defer conn.Close() //nolint:errcheck // This only returns an error if the connection is already closed or closing. conns[fn.GetName()] = conn } + return &RuntimeFunctionRunner{conns: conns}, nil +} + +// RunFunction runs the named function. +func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { + r.mx.Lock() + defer r.mx.Unlock() + + conn, ok := r.conns[name] + if !ok { + return nil, errors.Errorf("unknown Function %q - does it exist in your Functions file?", name) + } + + return fnv1beta1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) +} + +// Stop all of the runner's runtimes, and close its gRPC connections. +func (r *RuntimeFunctionRunner) Stop(ctx context.Context) error { + r.mx.Lock() + defer r.mx.Unlock() + + for name, conn := range r.conns { + _ = conn.Close() + delete(r.conns, name) + } + for name, rctx := range r.contexts { + if err := rctx.Stop(ctx); err != nil { + return errors.Wrapf(err, "cannot stop function %q runtime (target %q)", name, rctx.Target) + } + delete(r.contexts, name) + } + + return nil +} + +// Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. +func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocognit // TODO(negz): Should we refactor to break this up a bit? + runtimes, err := NewRuntimeFunctionRunner(ctx, log, in.Functions) + if err != nil { + return Outputs{}, errors.Wrap(err, "cannot start function runtimes") + } + + defer func() { + if err := runtimes.Stop(ctx); err != nil { + log.Info("Error stopping function runtimes", "error", err) + } + }() + + runner := composite.NewFetchingFunctionRunner(runtimes, &FilteringFetcher{extra: in.ExtraResources}) + observed := composite.ComposedResourceStates{} for i, cd := range in.ObservedResources { name := cd.GetAnnotations()[AnnotationKeyCompositionResourceName] @@ -159,13 +215,6 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) // the desired state returned by the last, and each Function may produce // results. for _, fn := range in.Composition.Spec.Pipeline { - conn, ok := conns[fn.FunctionRef.Name] - if !ok { - return Outputs{}, errors.Errorf("unknown Function %q, referenced by pipeline step %q - does it exist in your Functions file?", fn.FunctionRef.Name, fn.Step) - } - - fClient := fnv1beta1.NewFunctionRunnerServiceClient(conn) - // The request to send to the function, will be updated at each iteration if needed. req := &fnv1beta1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} @@ -177,55 +226,9 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) req.Input = in } - // Used to store the requirements returned at the previous iteration. - var requirements *fnv1beta1.Requirements - // Used to store the response of the function at the previous iteration. - var rsp *fnv1beta1.RunFunctionResponse - - extraResourcesLoop: - for i := int64(0); i <= composite.MaxRequirementsIterations; i++ { - if i == composite.MaxRequirementsIterations { - // The requirements didn't stabilize after the maximum number of iterations. - return Outputs{}, errors.Errorf("requirements didn't stabilize after the maximum number of iterations (%d)", composite.MaxRequirementsIterations) - } - - rsp, err = fClient.RunFunction(ctx, req) - if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot run pipeline step %q", fn.Step) - } - - for _, rs := range rsp.GetResults() { - if rs.GetSeverity() == fnv1beta1.Severity_SEVERITY_FATAL { - // We won't iterate if the function returned a fatal result, we'll handle results after the loop. - break - } - } - - newRequirements := rsp.GetRequirements() - if reflect.DeepEqual(newRequirements, requirements) { - // The requirements are stable, the function is done. - break extraResourcesLoop - } - - // Store the requirements for the next iteration. - requirements = newRequirements - - // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*fnv1beta1.Resources) - - // Fetch the requested resources and add them to the desired state. - for name, selector := range newRequirements.GetExtraResources() { - newExtraResources, err := filterExtraResources(in.ExtraResources, selector) - if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot filter extra resources for pipeline step %q", fn.Step) - } - - // Resources would be nil in case of not found resources. - req.ExtraResources[name] = newExtraResources - } - - // Pass down the updated context across iterations. - req.Context = rsp.GetContext() + rsp, err := runner.RunFunction(ctx, fn.FunctionRef.Name, req) + if err != nil { + return Outputs{}, errors.Wrapf(err, "cannot run pipeline step %q", fn.Step) } // Pass the desired state returned by this Function to the next one. @@ -337,19 +340,27 @@ func SetComposedResourceMetadata(cd resource.Object, xr resource.Composite, name return errors.Wrapf(meta.AddControllerReference(cd, or), "cannot set composite resource %q as controller ref of composed resource", xr.GetName()) } -func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { - if len(ers) == 0 || selector == nil { +// FilteringFetcher is a composite.ExtraResourcesFetcher that "fetches" any +// supplied resource that matches a resource selector. +type FilteringFetcher struct { + extra []unstructured.Unstructured +} + +// Fetch returns all of the underlying extra resources that match the supplied +// resource selector. +func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { + if len(f.extra) == 0 || rs == nil { return nil, nil } out := &fnv1beta1.Resources{} - for _, er := range ers { - if selector.GetApiVersion() != er.GetAPIVersion() { + for _, er := range f.extra { + if rs.GetApiVersion() != er.GetAPIVersion() { continue } - if selector.GetKind() != er.GetKind() { + if rs.GetKind() != er.GetKind() { continue } - if selector.GetMatchName() == er.GetName() { + if rs.GetMatchName() == er.GetName() { o, err := composite.AsStruct(&er) if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) @@ -357,8 +368,8 @@ func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.R out.Items = []*fnv1beta1.Resource{{Resource: o}} return out, nil } - if selector.GetMatchLabels() != nil { - if labels.SelectorFromSet(selector.GetMatchLabels().GetLabels()).Matches(labels.Set(er.GetLabels())) { + if rs.GetMatchLabels() != nil { + if labels.SelectorFromSet(rs.GetMatchLabels().GetLabels()).Matches(labels.Set(er.GetLabels())) { o, err := composite.AsStruct(&er) if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/beta/render/render_test.go index e6e57592b..1a21df22f 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/beta/render/render_test.go @@ -34,11 +34,17 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + ucomposite "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" +) + +var ( + _ composite.FunctionRunner = &RuntimeFunctionRunner{} + _ composite.ExtraResourcesFetcher = &FilteringFetcher{} ) func TestRender(t *testing.T) { @@ -65,7 +71,7 @@ func TestRender(t *testing.T) { "InvalidContextValue": { args: args{ in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Context: map[string][]byte{ "not-valid-json": []byte(`{`), }, @@ -78,7 +84,7 @@ func TestRender(t *testing.T) { "InvalidInput": { args: args{ in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Pipeline: []apiextensionsv1.PipelineStep{ @@ -115,7 +121,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Mode: &pipeline, @@ -137,7 +143,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Mode: &pipeline, @@ -181,7 +187,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -263,7 +269,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -349,7 +355,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -433,7 +439,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -518,7 +524,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -650,7 +656,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -798,8 +804,11 @@ func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1beta1.Run } func TestFilterExtraResources(t *testing.T) { + type params struct { + ers []unstructured.Unstructured + } type args struct { - ers []unstructured.Unstructured + ctx context.Context selector *fnv1beta1.ResourceSelector } type want struct { @@ -809,13 +818,16 @@ func TestFilterExtraResources(t *testing.T) { cases := map[string]struct { reason string + params params args args want want }{ "NilResources": { reason: "Should return empty slice if no extra resources are passed", - args: args{ + params: params{ ers: []unstructured.Unstructured{}, + }, + args: args{ selector: &fnv1beta1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", @@ -831,7 +843,7 @@ func TestFilterExtraResources(t *testing.T) { }, "NilSelector": { reason: "Should return empty slice if no selector is passed", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -843,6 +855,8 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, + }, + args: args{ selector: nil, }, want: want{ @@ -852,7 +866,7 @@ func TestFilterExtraResources(t *testing.T) { }, "MatchName": { reason: "Should return slice with matching resource for name selector", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -882,6 +896,8 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, + }, + args: args{ selector: &fnv1beta1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", @@ -909,7 +925,7 @@ func TestFilterExtraResources(t *testing.T) { }, "MatchLabels": { reason: "Should return slice with matching resources for matching selector", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -969,6 +985,8 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, + }, + args: args{ selector: &fnv1beta1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", @@ -1016,7 +1034,8 @@ func TestFilterExtraResources(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - out, err := filterExtraResources(tc.args.ers, tc.args.selector) + f := &FilteringFetcher{extra: tc.params.ers} + out, err := f.Fetch(tc.args.ctx, tc.args.selector) if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(fnv1beta1.Resources{}, fnv1beta1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { t.Errorf("%s\nfilterExtraResources(...): -want, +got:\n%s", tc.reason, diff) } diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 839f065dc..80461809c 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -186,7 +186,7 @@ func TestConfigurationTypeSupport(t *testing.T) { "SuccessfulConfigMetaAndPkg": { // config-meta // └─►function-dep-1 - //config-pkg + // config-pkg //└─►provider-dep-1 reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", args: args{ diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index c9cbd9531..b80bd7ff0 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -19,7 +19,6 @@ import ( "context" "crypto/sha256" "fmt" - "reflect" "sort" "strings" @@ -77,7 +76,6 @@ const ( errFmtUnmarshalDesiredCD = "cannot unmarshal desired composed resource %q from RunFunctionResponse" errFmtCDAsStruct = "cannot encode composed resource %q to protocol buffer Struct well-known type" errFmtFatalResult = "pipeline step %q returned a fatal result: %s" - errFmtFunctionMaxIterations = "step %q requirements didn't stabilize after the maximum number of iterations (%d)" ) // Server-side-apply field owners. We need two of these because it's possible @@ -103,13 +101,6 @@ const ( FunctionContextKeyEnvironment = "apiextensions.crossplane.io/environment" ) -const ( - // MaxRequirementsIterations is the maximum number of times a Function should be called, - // limiting the number of times it can request for extra resources, capped for - // safety. - MaxRequirementsIterations = 5 -) - // A FunctionComposer supports composing resources using a pipeline of // Composition Functions. It ignores the P&T resources array. type FunctionComposer struct { @@ -210,14 +201,6 @@ func WithComposedResourceObserver(g ComposedResourceObserver) FunctionComposerOp } } -// WithExtraResourcesFetcher configures how the FunctionComposer should fetch extra -// resources requested by functions. -func WithExtraResourcesFetcher(f ExtraResourcesFetcher) FunctionComposerOption { - return func(p *FunctionComposer) { - p.composite.ExtraResourcesFetcher = f - } -} - // WithComposedResourceGarbageCollector configures how the FunctionComposer should // garbage collect undesired composed resources. func WithComposedResourceGarbageCollector(d ComposedResourceGarbageCollector) FunctionComposerOption { @@ -341,9 +324,11 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur } } - rsp, err := c.runFunction(ctx, fn, req) + // TODO(negz): Generate a content-addressable tag for this request. + // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? + rsp, err := c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) if err != nil { - return CompositionResult{}, err + return CompositionResult{}, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) } // Pass the desired state returned by this Function to the next one. @@ -570,61 +555,6 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events, Conditions: conditions}, nil } -func (c *FunctionComposer) runFunction(ctx context.Context, fn v1.PipelineStep, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - // Used to store the requirements returned at the previous iteration. - var requirements *v1beta1.Requirements - - for i := int64(0); i <= MaxRequirementsIterations; i++ { - // TODO(negz): Generate a content-addressable tag for this request. - // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? - rsp, err := c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) - if err != nil { - return nil, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) - } - - if c.composite.ExtraResourcesFetcher == nil { - // If we don't have an extra resources getter, we don't need to - // iterate to satisfy the requirements. - return rsp, nil - } - - for _, rs := range rsp.GetResults() { - if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { - // We won't iterate if the function returned a fatal result, we'll handle results after the loop. - return rsp, nil - } - } - - newRequirements := rsp.GetRequirements() - if reflect.DeepEqual(newRequirements, requirements) { - // The requirements stabilized, the function is done. - return rsp, nil - } - - // Store the requirements for the next iteration. - requirements = newRequirements - - // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*v1beta1.Resources) - - // Fetch the requested resources and add them to the desired state. - for name, selector := range newRequirements.GetExtraResources() { - resources, err := c.composite.ExtraResourcesFetcher.Fetch(ctx, selector) - if err != nil { - return nil, errors.Wrapf(err, "fetching resources for %s", name) - } - - // Resources would be nil in case of not found resources. - req.ExtraResources[name] = resources - } - - // Pass down the updated context across iterations. - req.Context = rsp.GetContext() - } - // The requirements didn't stabilize after the maximum number of iterations. - return nil, errors.Errorf(errFmtFunctionMaxIterations, fn.Step, MaxRequirementsIterations) -} - // ComposedFieldOwnerName generates a unique field owner name // for a given Crossplane composite resource (XR). This uniqueness is crucial to // prevent multiple XRs, which compose the same resource, from continuously @@ -648,67 +578,6 @@ func ComposedFieldOwnerName(xr *composite.Unstructured) string { return fmt.Sprintf("%s/%x", FieldOwnerComposedPrefix, h.Sum(nil)) } -// ExistingExtraResourcesFetcher fetches extra resources requested by -// functions using the provided client.Reader. -type ExistingExtraResourcesFetcher struct { - client client.Reader -} - -// NewExistingExtraResourcesFetcher returns a new ExistingExtraResourcesFetcher. -func NewExistingExtraResourcesFetcher(c client.Reader) *ExistingExtraResourcesFetcher { - return &ExistingExtraResourcesFetcher{client: c} -} - -// Fetch fetches resources requested by functions using the provided client.Reader. -func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - if rs == nil { - return nil, errors.New(errNilResourceSelector) - } - switch match := rs.GetMatch().(type) { - case *v1beta1.ResourceSelector_MatchName: - // Fetch a single resource. - r := &kunstructured.Unstructured{} - r.SetAPIVersion(rs.GetApiVersion()) - r.SetKind(rs.GetKind()) - nn := types.NamespacedName{Name: rs.GetMatchName()} - err := e.client.Get(ctx, nn, r) - if kerrors.IsNotFound(err) { - // The resource doesn't exist. We'll return nil, which the Functions - // know means that the resource was not found. - return nil, nil - } - if err != nil { - return nil, errors.Wrap(err, errGetExtraResourceByName) - } - o, err := AsStruct(r) - if err != nil { - return nil, errors.Wrap(err, errExtraResourceAsStruct) - } - return &v1beta1.Resources{Items: []*v1beta1.Resource{{Resource: o}}}, nil - case *v1beta1.ResourceSelector_MatchLabels: - // Fetch a list of resources. - list := &kunstructured.UnstructuredList{} - list.SetAPIVersion(rs.GetApiVersion()) - list.SetKind(rs.GetKind()) - - if err := e.client.List(ctx, list, client.MatchingLabels(match.MatchLabels.GetLabels())); err != nil { - return nil, errors.Wrap(err, errListExtraResources) - } - - resources := make([]*v1beta1.Resource, len(list.Items)) - for i, r := range list.Items { - o, err := AsStruct(&r) - if err != nil { - return nil, errors.Wrap(err, errExtraResourceAsStruct) - } - resources[i] = &v1beta1.Resource{Resource: o} - } - - return &v1beta1.Resources{Items: resources}, nil - } - return nil, errors.New(errUnknownResourceSelector) -} - // An ExistingComposedResourceObserver uses an XR's resource references to load // any existing composed resources from the API server. It also loads their // connection details. diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 40aafeef1..bbccb1ee1 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -28,7 +28,6 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -866,235 +865,6 @@ func TestFunctionCompose(t *testing.T) { err: nil, }, }, - "SuccessfulWithExtraResources": { - reason: "We should return a valid CompositionResult when a 'pure Function' (i.e. patch-and-transform-less) reconcile succeeds after having requested some extra resource", - params: params{ - kube: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "UncoolComposed"}, "")), // all names are available - MockPatch: test.NewMockPatchFn(nil), - MockStatusPatch: test.NewMockSubResourcePatchFn(nil), - }, - r: func() FunctionRunner { - var nrCalls int - return FunctionRunnerFn(func(_ context.Context, _ string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - defer func() { nrCalls++ }() - requirements := &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ - "existing": { - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "existing", - }, - }, - "missing": { - ApiVersion: "test.crossplane.io/v1", - Kind: "Bar", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "missing", - }, - }, - }, - } - rsp := &v1beta1.RunFunctionResponse{ - Desired: &v1beta1.State{ - Composite: &v1beta1.Resource{ - Resource: MustStruct(map[string]any{ - "status": map[string]any{ - "widgets": 42, - }, - }), - ConnectionDetails: map[string][]byte{"from": []byte("function-pipeline")}, - }, - Resources: map[string]*v1beta1.Resource{ - "observed-resource-a": { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "CoolComposed", - "spec": map[string]any{ - "someKey": req.GetInput().AsMap()["someKey"].(string), - }, - }), - Ready: v1beta1.Ready_READY_TRUE, - }, - "desired-resource-a": { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "CoolComposed", - }), - }, - }, - }, - Results: []*v1beta1.Result{ - { - Severity: v1beta1.Severity_SEVERITY_NORMAL, - Message: "A normal result", - }, - { - Severity: v1beta1.Severity_SEVERITY_WARNING, - Message: "A warning result", - }, - { - Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, - Message: "A result of unspecified severity", - }, - { - // If the severity is unknown, the target should be force set - // to target only the XR. - Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, - Message: "A result of unspecified severity targeting the claim should be forced to only target the XR.", - Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), - }, - }, - Requirements: requirements, - } - - if nrCalls > 1 { - t.Fatalf("unexpected number of calls to FunctionRunner.RunFunction, should have been exactly 2: %d", nrCalls+1) - return nil, errBoom - } - - if nrCalls == 1 { - if len(req.GetExtraResources()) != 2 { - t.Fatalf("unexpected number of extra resources: %d", len(requirements.GetExtraResources())) - } - if rs := req.GetExtraResources()["missing"]; rs != nil && len(rs.GetItems()) != 0 { - t.Fatalf("unexpected extra resource, expected none, got: %v", rs) - } - if rs := req.GetExtraResources()["existing"]; rs == nil || len(rs.GetItems()) != 1 { - t.Fatalf("unexpected extra resource, expected one, got: %v", rs) - } - } - - return rsp, nil - }) - }(), - o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { - return nil, nil - })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { - // We only try to extract connection details for - // observed resources. - r := ComposedResourceStates{ - "observed-resource-a": ComposedResourceState{ - Resource: &fake.Composed{ - ObjectMeta: metav1.ObjectMeta{Name: "observed-resource-a"}, - }, - }, - } - return r, nil - })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { - return nil - })), - WithExtraResourcesFetcher(ExtraResourcesFetcherFn(func(_ context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - if rs.GetMatchName() == "existing" { - return &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "existing", - }, - "spec": map[string]any{ - "someField": "someValue", - }, - }), - }, - }, - }, nil - } - return nil, nil - })), - }, - }, - args: args{ - xr: func() *composite.Unstructured { - // Our XR needs a GVK to survive round-tripping through a - // protobuf struct (which involves using the Kubernetes-aware - // JSON unmarshaller that requires a GVK). - xr := composite.New(composite.WithGroupVersionKind(schema.GroupVersionKind{ - Group: "test.crossplane.io", - Version: "v1", - Kind: "CoolComposite", - })) - xr.SetLabels(map[string]string{ - xcrd.LabelKeyNamePrefixForComposed: "parent-xr", - }) - return xr - }(), - req: CompositionRequest{ - Revision: &v1.CompositionRevision{ - Spec: v1.CompositionRevisionSpec{ - Pipeline: []v1.PipelineStep{ - { - Step: "run-cool-function", - FunctionRef: v1.FunctionReference{Name: "cool-function"}, - Input: &runtime.RawExtension{Raw: []byte(`{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Input", - "someKey": "someValue" - }`)}, - }, - }, - }, - }, - }, - }, - want: want{ - res: CompositionResult{ - Composed: []ComposedResource{ - {ResourceName: "desired-resource-a", Synced: true}, - {ResourceName: "observed-resource-a", Ready: true, Synced: true}, - }, - ConnectionDetails: managed.ConnectionDetails{ - "from": []byte("function-pipeline"), - }, - Events: []TargetedEvent{ - { - Event: event.Event{ - Type: "Normal", - Reason: "ComposeResources", - Message: "A normal result", - }, - Detail: "Pipeline step \"run-cool-function\"", - Target: CompositionTargetComposite, - }, - { - Event: event.Event{ - Type: "Warning", - Reason: "ComposeResources", - Message: "A warning result", - }, - Detail: "Pipeline step \"run-cool-function\"", - Target: CompositionTargetComposite, - }, - { - Event: event.Event{ - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", - }, - Target: CompositionTargetComposite, - }, - // If the severity is unknown, the target should be force set - // to target only the XR. - { - Event: event.Event{ - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity targeting the claim should be forced to only target the XR.", - }, - Target: CompositionTargetComposite, - }, - }, - }, - err: nil, - }, - }, } for name, tc := range cases { @@ -1652,206 +1422,3 @@ func TestUpdateResourceRefs(t *testing.T) { }) } } - -func TestExistingExtraResourcesFetcherFetch(t *testing.T) { - errBoom := errors.New("boom") - - type args struct { - rs *v1beta1.ResourceSelector - c client.Reader - } - type want struct { - res *v1beta1.Resources - err error - } - cases := map[string]struct { - reason string - args args - want want - }{ - "SuccessMatchName": { - reason: "We should return a valid Resources when a resource is found by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.SetName("cool-resource") - return nil - }), - }, - }, - want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cool-resource", - }, - }), - }, - }, - }, - }, - }, - "SuccessMatchLabels": { - reason: "We should return a valid Resources when a resource is found by labels", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ - Labels: map[string]string{ - "cool": "resource", - }, - }, - }, - }, - c: &test.MockClient{ - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - { - Object: map[string]interface{}{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]interface{}{ - "name": "cool-resource", - "labels": map[string]interface{}{ - "cool": "resource", - }, - }, - }, - }, - { - Object: map[string]interface{}{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]interface{}{ - "name": "cooler-resource", - "labels": map[string]interface{}{ - "cool": "resource", - }, - }, - }, - }, - } - return nil - }), - }, - }, - want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cool-resource", - "labels": map[string]any{ - "cool": "resource", - }, - }, - }), - }, - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cooler-resource", - "labels": map[string]any{ - "cool": "resource", - }, - }, - }), - }, - }, - }, - }, - }, - "NotFoundMatchName": { - reason: "We should return no error when a resource is not found by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "Foo"}, "cool-resource")), - }, - }, - want: want{ - res: nil, - err: nil, - }, - }, - // NOTE(phisco): No NotFound error is returned when listing resources by labels, so there is no NotFoundMatchLabels test case. - "ErrorMatchName": { - reason: "We should return any other error encountered when getting a resource by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }, - want: want{ - res: nil, - err: errBoom, - }, - }, - "ErrorMatchLabels": { - reason: "We should return any other error encountered when listing resources by labels", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ - Labels: map[string]string{ - "cool": "resource", - }, - }, - }, - }, - c: &test.MockClient{ - MockList: test.NewMockListFn(errBoom), - }, - }, - want: want{ - res: nil, - err: errBoom, - }, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - g := NewExistingExtraResourcesFetcher(tc.args.c) - res, err := g.Fetch(context.Background(), tc.args.rs) - if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { - t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.res, res, cmpopts.IgnoreUnexported(v1beta1.Resources{}, v1beta1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { - t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) - } - }) - } -} diff --git a/internal/controller/apiextensions/composite/extra_resources.go b/internal/controller/apiextensions/composite/extra_resources.go new file mode 100644 index 000000000..358d83b80 --- /dev/null +++ b/internal/controller/apiextensions/composite/extra_resources.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +*/ + +package composite + +import ( + "context" + "reflect" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + + "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" +) + +// MaxRequirementsIterations is the maximum number of times a Function should be +// called, limiting the number of times it can request for extra resources, +// capped for safety. +const MaxRequirementsIterations = 5 + +// A FetchingFunctionRunner wraps an underlying FunctionRunner, adding support +// for fetching any extra resources requested by the function it runs. +type FetchingFunctionRunner struct { + wrapped FunctionRunner + resources ExtraResourcesFetcher +} + +// NewFetchingFunctionRunner returns a FunctionRunner that supports fetching +// extra resources. +func NewFetchingFunctionRunner(r FunctionRunner, f ExtraResourcesFetcher) *FetchingFunctionRunner { + return &FetchingFunctionRunner{wrapped: r, resources: f} +} + +// RunFunction runs a function, repeatedly fetching any extra resources it asks +// for. The function may be run up to MaxRequirementsIterations times. +func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + // Used to store the requirements returned at the previous iteration. + var requirements *v1beta1.Requirements + + for i := int64(0); i <= MaxRequirementsIterations; i++ { + rsp, err := c.wrapped.RunFunction(ctx, name, req) + if err != nil { + // I can't think of any useful info to wrap this error with. + return nil, err + } + + for _, rs := range rsp.GetResults() { + if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + // We won't iterate if the function returned a fatal result. + return rsp, nil + } + } + + newRequirements := rsp.GetRequirements() + if reflect.DeepEqual(newRequirements, requirements) { + // The requirements stabilized, the function is done. + return rsp, nil + } + + // Store the requirements for the next iteration. + requirements = newRequirements + + // Cleanup the extra resources from the previous iteration to store the new ones + req.ExtraResources = make(map[string]*v1beta1.Resources) + + // Fetch the requested resources and add them to the desired state. + for name, selector := range newRequirements.GetExtraResources() { + resources, err := c.resources.Fetch(ctx, selector) + if err != nil { + return nil, errors.Wrapf(err, "fetching resources for %s", name) + } + + // Resources would be nil in case of not found resources. + req.ExtraResources[name] = resources + } + + // Pass down the updated context across iterations. + req.Context = rsp.GetContext() + } + // The requirements didn't stabilize after the maximum number of iterations. + return nil, errors.Errorf("requirements didn't stabilize after the maximum number of iterations (%d)", MaxRequirementsIterations) +} + +// ExistingExtraResourcesFetcher fetches extra resources requested by +// functions using the provided client.Reader. +type ExistingExtraResourcesFetcher struct { + client client.Reader +} + +// NewExistingExtraResourcesFetcher returns a new ExistingExtraResourcesFetcher. +func NewExistingExtraResourcesFetcher(c client.Reader) *ExistingExtraResourcesFetcher { + return &ExistingExtraResourcesFetcher{client: c} +} + +// Fetch fetches resources requested by functions using the provided client.Reader. +func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + if rs == nil { + return nil, errors.New(errNilResourceSelector) + } + switch match := rs.GetMatch().(type) { + case *v1beta1.ResourceSelector_MatchName: + // Fetch a single resource. + r := &kunstructured.Unstructured{} + r.SetAPIVersion(rs.GetApiVersion()) + r.SetKind(rs.GetKind()) + nn := types.NamespacedName{Name: rs.GetMatchName()} + err := e.client.Get(ctx, nn, r) + if kerrors.IsNotFound(err) { + // The resource doesn't exist. We'll return nil, which the Functions + // know means that the resource was not found. + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, errGetExtraResourceByName) + } + o, err := AsStruct(r) + if err != nil { + return nil, errors.Wrap(err, errExtraResourceAsStruct) + } + return &v1beta1.Resources{Items: []*v1beta1.Resource{{Resource: o}}}, nil + case *v1beta1.ResourceSelector_MatchLabels: + // Fetch a list of resources. + list := &kunstructured.UnstructuredList{} + list.SetAPIVersion(rs.GetApiVersion()) + list.SetKind(rs.GetKind()) + + if err := e.client.List(ctx, list, client.MatchingLabels(match.MatchLabels.GetLabels())); err != nil { + return nil, errors.Wrap(err, errListExtraResources) + } + + resources := make([]*v1beta1.Resource, len(list.Items)) + for i, r := range list.Items { + o, err := AsStruct(&r) + if err != nil { + return nil, errors.Wrap(err, errExtraResourceAsStruct) + } + resources[i] = &v1beta1.Resource{Resource: o} + } + + return &v1beta1.Resources{Items: resources}, nil + } + return nil, errors.New(errUnknownResourceSelector) +} diff --git a/internal/controller/apiextensions/composite/extra_resources_test.go b/internal/controller/apiextensions/composite/extra_resources_test.go new file mode 100644 index 000000000..9b7a1bf11 --- /dev/null +++ b/internal/controller/apiextensions/composite/extra_resources_test.go @@ -0,0 +1,468 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +*/ + +package composite + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/testing/protocmp" + kerrors "k8s.io/apimachinery/pkg/api/errors" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/test" + + "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" +) + +var _ FunctionRunner = &FetchingFunctionRunner{} + +func TestExistingExtraResourcesFetcherFetch(t *testing.T) { + errBoom := errors.New("boom") + + type args struct { + rs *v1beta1.ResourceSelector + c client.Reader + } + type want struct { + res *v1beta1.Resources + err error + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessMatchName": { + reason: "We should return a valid Resources when a resource is found by name", + args: args{ + rs: &v1beta1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &v1beta1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.SetName("cool-resource") + return nil + }), + }, + }, + want: want{ + res: &v1beta1.Resources{ + Items: []*v1beta1.Resource{ + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cool-resource", + }, + }), + }, + }, + }, + }, + }, + "SuccessMatchLabels": { + reason: "We should return a valid Resources when a resource is found by labels", + args: args{ + rs: &v1beta1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &v1beta1.ResourceSelector_MatchLabels{ + MatchLabels: &v1beta1.MatchLabels{ + Labels: map[string]string{ + "cool": "resource", + }, + }, + }, + }, + c: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + obj.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]interface{}{ + "name": "cool-resource", + "labels": map[string]interface{}{ + "cool": "resource", + }, + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]interface{}{ + "name": "cooler-resource", + "labels": map[string]interface{}{ + "cool": "resource", + }, + }, + }, + }, + } + return nil + }), + }, + }, + want: want{ + res: &v1beta1.Resources{ + Items: []*v1beta1.Resource{ + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cool-resource", + "labels": map[string]any{ + "cool": "resource", + }, + }, + }), + }, + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cooler-resource", + "labels": map[string]any{ + "cool": "resource", + }, + }, + }), + }, + }, + }, + }, + }, + "NotFoundMatchName": { + reason: "We should return no error when a resource is not found by name", + args: args{ + rs: &v1beta1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &v1beta1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "Foo"}, "cool-resource")), + }, + }, + want: want{ + res: nil, + err: nil, + }, + }, + // NOTE(phisco): No NotFound error is returned when listing resources by labels, so there is no NotFoundMatchLabels test case. + "ErrorMatchName": { + reason: "We should return any other error encountered when getting a resource by name", + args: args{ + rs: &v1beta1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &v1beta1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, + }, + want: want{ + res: nil, + err: errBoom, + }, + }, + "ErrorMatchLabels": { + reason: "We should return any other error encountered when listing resources by labels", + args: args{ + rs: &v1beta1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &v1beta1.ResourceSelector_MatchLabels{ + MatchLabels: &v1beta1.MatchLabels{ + Labels: map[string]string{ + "cool": "resource", + }, + }, + }, + }, + c: &test.MockClient{ + MockList: test.NewMockListFn(errBoom), + }, + }, + want: want{ + res: nil, + err: errBoom, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + g := NewExistingExtraResourcesFetcher(tc.args.c) + res, err := g.Fetch(context.Background(), tc.args.rs) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.res, res, protocmp.Transform()); diff != "" { + t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +func TestFetchingFunctionRunner(t *testing.T) { + coolResource := MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "Kind": "CoolResource", + "metadata": map[string]any{ + "name": "pretty-cool", + }, + }) + + // Used in the Success test + called := false + + type params struct { + wrapped FunctionRunner + resources ExtraResourcesFetcher + } + type args struct { + ctx context.Context + name string + req *v1beta1.RunFunctionRequest + } + type want struct { + rsp *v1beta1.RunFunctionResponse + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "RunFunctionError": { + reason: "We should return an error if the wrapped FunctionRunner does", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + return nil, errors.New("boom") + }), + }, + args: args{}, + want: want{ + err: cmpopts.AnyError, + }, + }, + "FatalResult": { + reason: "We should return early if the function returns a fatal result", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + rsp := &v1beta1.RunFunctionResponse{ + Results: []*v1beta1.Result{ + { + Severity: v1beta1.Severity_SEVERITY_FATAL, + }, + }, + } + return rsp, nil + }), + }, + args: args{}, + want: want{ + rsp: &v1beta1.RunFunctionResponse{ + Results: []*v1beta1.Result{ + { + Severity: v1beta1.Severity_SEVERITY_FATAL, + }, + }, + }, + err: nil, + }, + }, + "NoRequirements": { + reason: "We should return the response unchanged if there are no requirements", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + rsp := &v1beta1.RunFunctionResponse{ + Results: []*v1beta1.Result{ + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + }, + }, + } + return rsp, nil + }), + }, + args: args{}, + want: want{ + rsp: &v1beta1.RunFunctionResponse{ + Results: []*v1beta1.Result{ + { + Severity: v1beta1.Severity_SEVERITY_NORMAL, + }, + }, + }, + err: nil, + }, + }, + "FetchResourcesError": { + reason: "We should return any error encountered when fetching extra resources", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + rsp := &v1beta1.RunFunctionResponse{ + Requirements: &v1beta1.Requirements{ + ExtraResources: map[string]*v1beta1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + return nil, errors.New("boom") + }), + }, + args: args{ + req: &v1beta1.RunFunctionRequest{}, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "RequirementsDidntStabilizeError": { + reason: "We should return an error if the function's requirements never stabilize", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + rsp := &v1beta1.RunFunctionResponse{ + Requirements: &v1beta1.Requirements{ + ExtraResources: map[string]*v1beta1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + + // What are the chances we get the same number 5 times in a row? + Kind: fmt.Sprintf("CoolResource%d", rand.Int31()), + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + return &v1beta1.Resources{}, nil + }), + }, + args: args{ + req: &v1beta1.RunFunctionRequest{}, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "Success": { + reason: "We should return the fetched resources", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + // We only expect to be sent extra resources the second time + // we're called, in response to our requirements. + if called { + want := &v1beta1.RunFunctionRequest{ + ExtraResources: map[string]*v1beta1.Resources{ + "gimme": { + Items: []*v1beta1.Resource{{Resource: coolResource}}, + }, + }, + } + + if diff := cmp.Diff(want, req, protocmp.Transform()); diff != "" { + t.Errorf("RunFunction(): -want, +got:\n%s", diff) + return nil, errors.New("unexpected RunFunctionRequest") + } + } + + called = true + + rsp := &v1beta1.RunFunctionResponse{ + Requirements: &v1beta1.Requirements{ + ExtraResources: map[string]*v1beta1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + r := &v1beta1.Resources{ + Items: []*v1beta1.Resource{{Resource: coolResource}}, + } + return r, nil + }), + }, + args: args{ + req: &v1beta1.RunFunctionRequest{}, + }, + want: want{ + rsp: &v1beta1.RunFunctionResponse{ + Requirements: &v1beta1.Requirements{ + ExtraResources: map[string]*v1beta1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := NewFetchingFunctionRunner(tc.params.wrapped, tc.params.resources) + rsp, err := r.RunFunction(tc.args.ctx, tc.args.name, tc.args.req) + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.RunFunction(...): -want, +got:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.rsp, rsp, protocmp.Transform()); diff != "" { + t.Errorf("\n%s\nr.RunFunction(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 58ce14d66..4444acd5f 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -600,11 +600,12 @@ func (r *Reconciler) CompositeReconcilerOptions(ctx context.Context, d *v1.Compo composite.WithCompositeConnectionDetailsFetcher(fetcher), } + var runner composite.FunctionRunner = r.options.FunctionRunner if r.options.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(r.engine.GetClient()))) + runner = composite.NewFetchingFunctionRunner(runner, composite.NewExistingExtraResourcesFetcher(r.engine.GetClient())) } - fc := composite.NewFunctionComposer(r.engine.GetClient(), r.options.FunctionRunner, fcopts...) + fc := composite.NewFunctionComposer(r.engine.GetClient(), runner, fcopts...) // Note that if external secret stores are enabled this will supersede // the WithComposer option specified in that block. From 81074a1e1d21eeedacdc3593c0b2ecf3d67c4dc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 14 Aug 2024 16:56:59 +0300 Subject: [PATCH 338/370] Add secrets.crossplane.io grant rules to RBAC manager roles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lucas Käldström --- .../templates/rbac-manager-managed-clusterroles.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml b/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml index 2ddd200c7..c8ad21be5 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml @@ -103,6 +103,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: ["*"] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: ["*"] # Crossplane administrators have access to view CRDs in order to debug XRDs. - apiGroups: [apiextensions.k8s.io] resources: [customresourcedefinitions] @@ -139,6 +143,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: ["*"] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -166,6 +174,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: [get, list, watch] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: [get, list, watch] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole From 2f2241de1944aae0f6ad7d0a4d771867dee82bcb Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 14 Aug 2024 17:29:28 -0700 Subject: [PATCH 339/370] Promote functions to v1, deprecate native P&T Signed-off-by: Nic Cope --- .golangci.yml | 7 + .../proto/{v1beta1 => v1}/run_function.pb.go | 820 +++++++++--------- .../proto/{v1beta1 => v1}/run_function.proto | 4 +- .../{v1beta1 => v1}/run_function_grpc.pb.go | 10 +- .../v1/composition_revision_types.go | 32 +- apis/apiextensions/v1/composition_types.go | 30 +- apis/generate.go | 7 + apis/pkg/meta/v1/conversion.go | 3 + .../meta/{v1beta1 => v1}/function_types.go | 2 +- apis/pkg/meta/v1/interfaces.go | 11 + apis/pkg/meta/v1/register.go | 9 + apis/pkg/meta/v1/zz_generated.deepcopy.go | 47 + apis/pkg/meta/v1beta1/conversion.go | 73 +- apis/pkg/meta/v1beta1/function_interfaces.go | 50 -- .../meta/v1beta1/zz_generated.conversion.go | 161 ++++ ...test.go => zz_generated.function_types.go} | 22 +- apis/pkg/{v1beta1 => v1}/function_types.go | 22 +- apis/pkg/v1/interfaces.go | 309 +++++++ apis/pkg/v1/interfaces_test.go | 3 + apis/pkg/v1/register.go | 18 + apis/pkg/v1/zz_generated.deepcopy.go | 185 ++++ apis/pkg/v1beta1/function_interfaces.go | 334 ------- apis/pkg/v1beta1/function_interfaces_test.go | 25 - apis/pkg/v1beta1/zz_generated.deepcopy.go | 246 ++++++ .../v1beta1/zz_generated.function_types.go | 120 +++ .../zz_generated.package_runtime_types.go | 73 ++ .../pkg/v1beta1/zz_generated.package_types.go | 94 ++ .../v1beta1/zz_generated.revision_types.go | 111 +++ ...ns.crossplane.io_compositionrevisions.yaml | 68 +- ...extensions.crossplane.io_compositions.yaml | 31 +- .../pkg.crossplane.io_functionrevisions.yaml | 318 ++++++- cluster/crds/pkg.crossplane.io_functions.yaml | 218 ++++- cmd/crank/beta/render/load.go | 10 +- cmd/crank/beta/render/load_test.go | 12 +- cmd/crank/beta/render/render.go | 28 +- cmd/crank/beta/render/render_test.go | 126 +-- cmd/crank/beta/render/runtime.go | 4 +- cmd/crank/beta/render/runtime_development.go | 4 +- cmd/crank/beta/render/runtime_docker.go | 8 +- cmd/crank/beta/render/runtime_docker_test.go | 35 +- cmd/crank/beta/render/testdata/functions.yaml | 2 +- .../trace/internal/resource/xpkg/client.go | 28 +- .../internal/resource/xpkg/client_test.go | 2 +- .../beta/trace/internal/resource/xpkg/xpkg.go | 8 +- .../trace/internal/resource/xpkg/xpkg_test.go | 34 +- cmd/crank/beta/validate/manager_test.go | 2 +- cmd/crank/xpkg/install.go | 4 +- cmd/crank/xpkg/update.go | 2 +- cmd/crossplane/core/core.go | 67 +- .../composite/composition_functions.go | 56 +- .../composite/composition_functions_test.go | 108 +-- .../composite/extra_resources.go | 24 +- .../composite/extra_resources_test.go | 126 +-- .../apiextensions/definition/reconciler.go | 67 +- internal/controller/pkg/manager/reconciler.go | 13 +- internal/controller/pkg/pkg.go | 15 +- .../controller/pkg/resolver/reconciler.go | 2 +- .../controller/pkg/revision/reconciler.go | 9 +- internal/controller/pkg/revision/runtime.go | 2 +- .../pkg/revision/runtime_function.go | 11 +- .../pkg/revision/runtime_function_test.go | 125 ++- .../controller/pkg/revision/runtime_test.go | 4 +- internal/controller/pkg/revision/watch.go | 2 +- internal/features/features.go | 10 - internal/initializer/installer.go | 5 +- internal/initializer/installer_test.go | 41 +- internal/xfn/function_runner.go | 13 +- internal/xfn/function_runner_metrics.go | 8 +- internal/xfn/function_runner_test.go | 67 +- internal/xpkg/lint.go | 4 +- 70 files changed, 3071 insertions(+), 1480 deletions(-) rename apis/apiextensions/fn/proto/{v1beta1 => v1}/run_function.pb.go (52%) rename apis/apiextensions/fn/proto/{v1beta1 => v1}/run_function.proto (99%) rename apis/apiextensions/fn/proto/{v1beta1 => v1}/run_function_grpc.pb.go (94%) rename apis/pkg/meta/{v1beta1 => v1}/function_types.go (98%) delete mode 100644 apis/pkg/meta/v1beta1/function_interfaces.go create mode 100644 apis/pkg/meta/v1beta1/zz_generated.conversion.go rename apis/pkg/meta/v1beta1/{function_interfaces_test.go => zz_generated.function_types.go} (50%) rename apis/pkg/{v1beta1 => v1}/function_types.go (84%) delete mode 100644 apis/pkg/v1beta1/function_interfaces.go delete mode 100644 apis/pkg/v1beta1/function_interfaces_test.go create mode 100644 apis/pkg/v1beta1/zz_generated.function_types.go create mode 100644 apis/pkg/v1beta1/zz_generated.package_runtime_types.go create mode 100644 apis/pkg/v1beta1/zz_generated.package_types.go create mode 100644 apis/pkg/v1beta1/zz_generated.revision_types.go diff --git a/.golangci.yml b/.golangci.yml index 90f918be7..dc1aa045f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -288,6 +288,13 @@ issues: linters: - musttag + # Various fields related to native patch and transform Composition are + # deprecated, but we can't drop support from Crossplane 1.x. We ignore the + # warnings globally instead of suppressing them with comments everywhere. + - text: "SA1019: .+ is deprecated: Use Composition Functions instead." + linters: + - staticcheck + # Independently from option `exclude` we use default exclude patterns, # it can be disabled by this option. To list all # excluded by default patterns execute `golangci-lint run --help`. diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1/run_function.pb.go similarity index 52% rename from apis/apiextensions/fn/proto/v1beta1/run_function.pb.go rename to apis/apiextensions/fn/proto/v1/run_function.pb.go index 20a9660b6..5b7f99bc0 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1/run_function.pb.go @@ -17,9 +17,9 @@ // versions: // protoc-gen-go v1.33.0 // protoc (unknown) -// source: apiextensions/fn/proto/v1beta1/run_function.proto +// source: apiextensions/fn/proto/v1/run_function.proto -package v1beta1 +package v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -73,11 +73,11 @@ func (x Ready) String() string { } func (Ready) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0].Descriptor() + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[0].Descriptor() } func (Ready) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0] + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[0] } func (x Ready) Number() protoreflect.EnumNumber { @@ -86,7 +86,7 @@ func (x Ready) Number() protoreflect.EnumNumber { // Deprecated: Use Ready.Descriptor instead. func (Ready) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{0} } // Severity of Function results. @@ -134,11 +134,11 @@ func (x Severity) String() string { } func (Severity) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[1].Descriptor() + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[1].Descriptor() } func (Severity) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[1] + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[1] } func (x Severity) Number() protoreflect.EnumNumber { @@ -147,7 +147,7 @@ func (x Severity) Number() protoreflect.EnumNumber { // Deprecated: Use Severity.Descriptor instead. func (Severity) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{1} } // Target of Function results and conditions. @@ -189,11 +189,11 @@ func (x Target) String() string { } func (Target) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[2].Descriptor() + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[2].Descriptor() } func (Target) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[2] + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[2] } func (x Target) Number() protoreflect.EnumNumber { @@ -202,7 +202,7 @@ func (x Target) Number() protoreflect.EnumNumber { // Deprecated: Use Target.Descriptor instead. func (Target) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{2} } type Status int32 @@ -241,11 +241,11 @@ func (x Status) String() string { } func (Status) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[3].Descriptor() + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[3].Descriptor() } func (Status) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[3] + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[3] } func (x Status) Number() protoreflect.EnumNumber { @@ -254,7 +254,7 @@ func (x Status) Number() protoreflect.EnumNumber { // Deprecated: Use Status.Descriptor instead. func (Status) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{3} } // A RunFunctionRequest requests that the Composition Function be run. @@ -303,7 +303,7 @@ type RunFunctionRequest struct { func (x *RunFunctionRequest) Reset() { *x = RunFunctionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -316,7 +316,7 @@ func (x *RunFunctionRequest) String() string { func (*RunFunctionRequest) ProtoMessage() {} func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -329,7 +329,7 @@ func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. func (*RunFunctionRequest) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{0} } func (x *RunFunctionRequest) GetMeta() *RequestMeta { @@ -398,7 +398,7 @@ type Credentials struct { func (x *Credentials) Reset() { *x = Credentials{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -411,7 +411,7 @@ func (x *Credentials) String() string { func (*Credentials) ProtoMessage() {} func (x *Credentials) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -424,7 +424,7 @@ func (x *Credentials) ProtoReflect() protoreflect.Message { // Deprecated: Use Credentials.ProtoReflect.Descriptor instead. func (*Credentials) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{1} } func (m *Credentials) GetSource() isCredentials_Source { @@ -464,7 +464,7 @@ type CredentialData struct { func (x *CredentialData) Reset() { *x = CredentialData{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -477,7 +477,7 @@ func (x *CredentialData) String() string { func (*CredentialData) ProtoMessage() {} func (x *CredentialData) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -490,7 +490,7 @@ func (x *CredentialData) ProtoReflect() protoreflect.Message { // Deprecated: Use CredentialData.ProtoReflect.Descriptor instead. func (*CredentialData) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{2} } func (x *CredentialData) GetData() map[string][]byte { @@ -512,7 +512,7 @@ type Resources struct { func (x *Resources) Reset() { *x = Resources{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -525,7 +525,7 @@ func (x *Resources) String() string { func (*Resources) ProtoMessage() {} func (x *Resources) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -538,7 +538,7 @@ func (x *Resources) ProtoReflect() protoreflect.Message { // Deprecated: Use Resources.ProtoReflect.Descriptor instead. func (*Resources) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{3} } func (x *Resources) GetItems() []*Resource { @@ -583,7 +583,7 @@ type RunFunctionResponse struct { func (x *RunFunctionResponse) Reset() { *x = RunFunctionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -596,7 +596,7 @@ func (x *RunFunctionResponse) String() string { func (*RunFunctionResponse) ProtoMessage() {} func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -609,7 +609,7 @@ func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. func (*RunFunctionResponse) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{4} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{4} } func (x *RunFunctionResponse) GetMeta() *ResponseMeta { @@ -668,7 +668,7 @@ type RequestMeta struct { func (x *RequestMeta) Reset() { *x = RequestMeta{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -681,7 +681,7 @@ func (x *RequestMeta) String() string { func (*RequestMeta) ProtoMessage() {} func (x *RequestMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -694,7 +694,7 @@ func (x *RequestMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. func (*RequestMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{5} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{5} } func (x *RequestMeta) GetTag() string { @@ -718,7 +718,7 @@ type Requirements struct { func (x *Requirements) Reset() { *x = Requirements{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -731,7 +731,7 @@ func (x *Requirements) String() string { func (*Requirements) ProtoMessage() {} func (x *Requirements) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -744,7 +744,7 @@ func (x *Requirements) ProtoReflect() protoreflect.Message { // Deprecated: Use Requirements.ProtoReflect.Descriptor instead. func (*Requirements) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{6} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{6} } func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { @@ -776,7 +776,7 @@ type ResourceSelector struct { func (x *ResourceSelector) Reset() { *x = ResourceSelector{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -789,7 +789,7 @@ func (x *ResourceSelector) String() string { func (*ResourceSelector) ProtoMessage() {} func (x *ResourceSelector) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -802,7 +802,7 @@ func (x *ResourceSelector) ProtoReflect() protoreflect.Message { // Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. func (*ResourceSelector) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{7} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{7} } func (x *ResourceSelector) GetApiVersion() string { @@ -870,7 +870,7 @@ type MatchLabels struct { func (x *MatchLabels) Reset() { *x = MatchLabels{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -883,7 +883,7 @@ func (x *MatchLabels) String() string { func (*MatchLabels) ProtoMessage() {} func (x *MatchLabels) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -896,7 +896,7 @@ func (x *MatchLabels) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. func (*MatchLabels) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{8} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{8} } func (x *MatchLabels) GetLabels() map[string]string { @@ -924,7 +924,7 @@ type ResponseMeta struct { func (x *ResponseMeta) Reset() { *x = ResponseMeta{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -937,7 +937,7 @@ func (x *ResponseMeta) String() string { func (*ResponseMeta) ProtoMessage() {} func (x *ResponseMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -950,7 +950,7 @@ func (x *ResponseMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. func (*ResponseMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{9} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{9} } func (x *ResponseMeta) GetTag() string { @@ -982,7 +982,7 @@ type State struct { func (x *State) Reset() { *x = State{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -995,7 +995,7 @@ func (x *State) String() string { func (*State) ProtoMessage() {} func (x *State) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1008,7 +1008,7 @@ func (x *State) ProtoReflect() protoreflect.Message { // Deprecated: Use State.ProtoReflect.Descriptor instead. func (*State) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{10} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{10} } func (x *State) GetComposite() *Resource { @@ -1068,13 +1068,13 @@ type Resource struct { // // - A Function should not set this field in a RunFunctionResponse to indicate // that the desired composite resource is ready. This will be ignored. - Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` + Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1.Ready" json:"ready,omitempty"` } func (x *Resource) Reset() { *x = Resource{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1087,7 +1087,7 @@ func (x *Resource) String() string { func (*Resource) ProtoMessage() {} func (x *Resource) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1100,7 +1100,7 @@ func (x *Resource) ProtoReflect() protoreflect.Message { // Deprecated: Use Resource.ProtoReflect.Descriptor instead. func (*Resource) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{11} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{11} } func (x *Resource) GetResource() *structpb.Struct { @@ -1131,20 +1131,20 @@ type Result struct { unknownFields protoimpl.UnknownFields // Severity of this result. - Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` + Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1.Severity" json:"severity,omitempty"` // Human-readable details about the result. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // Optional PascalCase, machine-readable reason for this result. If omitted, // the value will be ComposeResources. Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` // The resources this result targets. - Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` + Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1.Target,oneof" json:"target,omitempty"` } func (x *Result) Reset() { *x = Result{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1157,7 +1157,7 @@ func (x *Result) String() string { func (*Result) ProtoMessage() {} func (x *Result) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1170,7 +1170,7 @@ func (x *Result) ProtoReflect() protoreflect.Message { // Deprecated: Use Result.ProtoReflect.Descriptor instead. func (*Result) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{12} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{12} } func (x *Result) GetSeverity() Severity { @@ -1213,7 +1213,7 @@ type Condition struct { // Type of condition in PascalCase. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Status of the condition. - Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1beta1.Status" json:"status,omitempty"` + Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1.Status" json:"status,omitempty"` // Reason contains a programmatic identifier indicating the reason for the // condition's last transition. Producers of specific condition types may // define expected values and meanings for this field, and whether the values @@ -1224,13 +1224,13 @@ type Condition struct { // transition. This may be an empty string. Message *string `protobuf:"bytes,4,opt,name=message,proto3,oneof" json:"message,omitempty"` // The resources this condition targets. - Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` + Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1.Target,oneof" json:"target,omitempty"` } func (x *Condition) Reset() { *x = Condition{} if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1243,7 +1243,7 @@ func (x *Condition) String() string { func (*Condition) ProtoMessage() {} func (x *Condition) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13] + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1256,7 +1256,7 @@ func (x *Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Condition.ProtoReflect.Descriptor instead. func (*Condition) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{13} + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{13} } func (x *Condition) GetType() string { @@ -1294,347 +1294,337 @@ func (x *Condition) GetTarget() Target { return Target_TARGET_UNSPECIFIED } -var File_apiextensions_fn_proto_v1beta1_run_function_proto protoreflect.FileDescriptor - -var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ - 0x0a, 0x31, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2f, 0x72, 0x75, 0x6e, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x8e, 0x06, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, - 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x07, - 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, +var File_apiextensions_fn_proto_v1_run_function_proto protoreflect.FileDescriptor + +var file_apiextensions_fn_proto_v1_run_function_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x75, 0x6e, 0x5f, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, - 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, - 0x01, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x6f, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x63, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x43, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x1a, 0x6c, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x6b, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, - 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x22, 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x12, 0x59, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x4b, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x05, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, + 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xbb, 0x03, - 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x08, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, + 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, - 0x01, 0x01, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, + 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, + 0x12, 0x6a, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x60, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, + 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x67, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x66, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x6d, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x54, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x46, 0x0a, 0x09, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x22, 0xa2, 0x03, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, + 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x4b, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, - 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xee, 0x01, 0x0a, - 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x69, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xe4, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x6e, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, - 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, - 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, - 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, - 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, - 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, 0x02, 0x0a, 0x05, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbe, + 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0x94, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, + 0x74, 0x6c, 0x22, 0xfc, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x09, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, + 0x4d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x61, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xa8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x36, + 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, + 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x01, 0x0a, + 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, + 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, + 0x3e, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, + 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, + 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, + 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, + 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x1e, + 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x10, 0x02, 0x2a, 0x7f, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, + 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, + 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x03, 0x32, + 0x87, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x6e, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x0b, 0x52, 0x75, 0x6e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, 0x52, 0x0a, - 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, - 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x12, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x05, 0x72, - 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, - 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe0, - 0x01, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x76, - 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x70, - 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x76, - 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x22, 0xf2, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, - 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, - 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, - 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, - 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, - 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, - 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, - 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, - 0x54, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, - 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, - 0x49, 0x4d, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, - 0x0a, 0x1c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, - 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, - 0x4c, 0x53, 0x45, 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x78, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, - 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce sync.Once - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc + file_apiextensions_fn_proto_v1_run_function_proto_rawDescOnce sync.Once + file_apiextensions_fn_proto_v1_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1_run_function_proto_rawDesc ) -func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte { - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce.Do(func() { - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData) +func file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP() []byte { + file_apiextensions_fn_proto_v1_run_function_proto_rawDescOnce.Do(func() { + file_apiextensions_fn_proto_v1_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1_run_function_proto_rawDescData) }) - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData -} - -var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ - (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready - (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity - (Target)(0), // 2: apiextensions.fn.proto.v1beta1.Target - (Status)(0), // 3: apiextensions.fn.proto.v1beta1.Status - (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest - (*Credentials)(nil), // 5: apiextensions.fn.proto.v1beta1.Credentials - (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1beta1.CredentialData - (*Resources)(nil), // 7: apiextensions.fn.proto.v1beta1.Resources - (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse - (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.RequestMeta - (*Requirements)(nil), // 10: apiextensions.fn.proto.v1beta1.Requirements - (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1beta1.ResourceSelector - (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1beta1.MatchLabels - (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1beta1.ResponseMeta - (*State)(nil), // 14: apiextensions.fn.proto.v1beta1.State - (*Resource)(nil), // 15: apiextensions.fn.proto.v1beta1.Resource - (*Result)(nil), // 16: apiextensions.fn.proto.v1beta1.Result - (*Condition)(nil), // 17: apiextensions.fn.proto.v1beta1.Condition - nil, // 18: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - nil, // 19: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry - nil, // 20: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry - nil, // 21: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - nil, // 22: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - nil, // 23: apiextensions.fn.proto.v1beta1.State.ResourcesEntry - nil, // 24: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescData +} + +var file_apiextensions_fn_proto_v1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_apiextensions_fn_proto_v1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_apiextensions_fn_proto_v1_run_function_proto_goTypes = []interface{}{ + (Ready)(0), // 0: apiextensions.fn.proto.v1.Ready + (Severity)(0), // 1: apiextensions.fn.proto.v1.Severity + (Target)(0), // 2: apiextensions.fn.proto.v1.Target + (Status)(0), // 3: apiextensions.fn.proto.v1.Status + (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1.RunFunctionRequest + (*Credentials)(nil), // 5: apiextensions.fn.proto.v1.Credentials + (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1.CredentialData + (*Resources)(nil), // 7: apiextensions.fn.proto.v1.Resources + (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1.RunFunctionResponse + (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1.RequestMeta + (*Requirements)(nil), // 10: apiextensions.fn.proto.v1.Requirements + (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1.ResourceSelector + (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1.MatchLabels + (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1.ResponseMeta + (*State)(nil), // 14: apiextensions.fn.proto.v1.State + (*Resource)(nil), // 15: apiextensions.fn.proto.v1.Resource + (*Result)(nil), // 16: apiextensions.fn.proto.v1.Result + (*Condition)(nil), // 17: apiextensions.fn.proto.v1.Condition + nil, // 18: apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry + nil, // 20: apiextensions.fn.proto.v1.CredentialData.DataEntry + nil, // 21: apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry + nil, // 22: apiextensions.fn.proto.v1.MatchLabels.LabelsEntry + nil, // 23: apiextensions.fn.proto.v1.State.ResourcesEntry + nil, // 24: apiextensions.fn.proto.v1.Resource.ConnectionDetailsEntry (*structpb.Struct)(nil), // 25: google.protobuf.Struct (*durationpb.Duration)(nil), // 26: google.protobuf.Duration } -var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ - 9, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta - 14, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State - 14, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 25, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct - 25, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct - 18, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - 19, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry - 6, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData - 20, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry - 15, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource - 13, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta - 14, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 16, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result - 25, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct - 10, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements - 17, // 15: apiextensions.fn.proto.v1beta1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1beta1.Condition - 21, // 16: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - 12, // 17: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels - 22, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - 26, // 19: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration - 15, // 20: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource - 23, // 21: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry - 25, // 22: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct - 24, // 23: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - 0, // 24: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready - 1, // 25: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity - 2, // 26: apiextensions.fn.proto.v1beta1.Result.target:type_name -> apiextensions.fn.proto.v1beta1.Target - 3, // 27: apiextensions.fn.proto.v1beta1.Condition.status:type_name -> apiextensions.fn.proto.v1beta1.Status - 2, // 28: apiextensions.fn.proto.v1beta1.Condition.target:type_name -> apiextensions.fn.proto.v1beta1.Target - 7, // 29: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources - 5, // 30: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials - 11, // 31: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector - 15, // 32: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource - 4, // 33: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest - 8, // 34: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse +var file_apiextensions_fn_proto_v1_run_function_proto_depIdxs = []int32{ + 9, // 0: apiextensions.fn.proto.v1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1.RequestMeta + 14, // 1: apiextensions.fn.proto.v1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1.State + 14, // 2: apiextensions.fn.proto.v1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1.State + 25, // 3: apiextensions.fn.proto.v1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 25, // 4: apiextensions.fn.proto.v1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 18, // 5: apiextensions.fn.proto.v1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry + 19, // 6: apiextensions.fn.proto.v1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry + 6, // 7: apiextensions.fn.proto.v1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1.CredentialData + 20, // 8: apiextensions.fn.proto.v1.CredentialData.data:type_name -> apiextensions.fn.proto.v1.CredentialData.DataEntry + 15, // 9: apiextensions.fn.proto.v1.Resources.items:type_name -> apiextensions.fn.proto.v1.Resource + 13, // 10: apiextensions.fn.proto.v1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1.ResponseMeta + 14, // 11: apiextensions.fn.proto.v1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1.State + 16, // 12: apiextensions.fn.proto.v1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1.Result + 25, // 13: apiextensions.fn.proto.v1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 10, // 14: apiextensions.fn.proto.v1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1.Requirements + 17, // 15: apiextensions.fn.proto.v1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1.Condition + 21, // 16: apiextensions.fn.proto.v1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry + 12, // 17: apiextensions.fn.proto.v1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1.MatchLabels + 22, // 18: apiextensions.fn.proto.v1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1.MatchLabels.LabelsEntry + 26, // 19: apiextensions.fn.proto.v1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 15, // 20: apiextensions.fn.proto.v1.State.composite:type_name -> apiextensions.fn.proto.v1.Resource + 23, // 21: apiextensions.fn.proto.v1.State.resources:type_name -> apiextensions.fn.proto.v1.State.ResourcesEntry + 25, // 22: apiextensions.fn.proto.v1.Resource.resource:type_name -> google.protobuf.Struct + 24, // 23: apiextensions.fn.proto.v1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1.Resource.ConnectionDetailsEntry + 0, // 24: apiextensions.fn.proto.v1.Resource.ready:type_name -> apiextensions.fn.proto.v1.Ready + 1, // 25: apiextensions.fn.proto.v1.Result.severity:type_name -> apiextensions.fn.proto.v1.Severity + 2, // 26: apiextensions.fn.proto.v1.Result.target:type_name -> apiextensions.fn.proto.v1.Target + 3, // 27: apiextensions.fn.proto.v1.Condition.status:type_name -> apiextensions.fn.proto.v1.Status + 2, // 28: apiextensions.fn.proto.v1.Condition.target:type_name -> apiextensions.fn.proto.v1.Target + 7, // 29: apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.Resources + 5, // 30: apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1.Credentials + 11, // 31: apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.ResourceSelector + 15, // 32: apiextensions.fn.proto.v1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.Resource + 4, // 33: apiextensions.fn.proto.v1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1.RunFunctionRequest + 8, // 34: apiextensions.fn.proto.v1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1.RunFunctionResponse 34, // [34:35] is the sub-list for method output_type 33, // [33:34] is the sub-list for method input_type 33, // [33:33] is the sub-list for extension type_name @@ -1642,13 +1632,13 @@ var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ 0, // [0:33] is the sub-list for field type_name } -func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } -func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { - if File_apiextensions_fn_proto_v1beta1_run_function_proto != nil { +func init() { file_apiextensions_fn_proto_v1_run_function_proto_init() } +func file_apiextensions_fn_proto_v1_run_function_proto_init() { + if File_apiextensions_fn_proto_v1_run_function_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunFunctionRequest); i { case 0: return &v.state @@ -1660,7 +1650,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Credentials); i { case 0: return &v.state @@ -1672,7 +1662,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CredentialData); i { case 0: return &v.state @@ -1684,7 +1674,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Resources); i { case 0: return &v.state @@ -1696,7 +1686,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunFunctionResponse); i { case 0: return &v.state @@ -1708,7 +1698,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RequestMeta); i { case 0: return &v.state @@ -1720,7 +1710,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Requirements); i { case 0: return &v.state @@ -1732,7 +1722,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceSelector); i { case 0: return &v.state @@ -1744,7 +1734,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MatchLabels); i { case 0: return &v.state @@ -1756,7 +1746,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResponseMeta); i { case 0: return &v.state @@ -1768,7 +1758,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*State); i { case 0: return &v.state @@ -1780,7 +1770,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Resource); i { case 0: return &v.state @@ -1792,7 +1782,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Result); i { case 0: return &v.state @@ -1804,7 +1794,7 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Condition); i { case 0: return &v.state @@ -1817,35 +1807,35 @@ func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { } } } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ (*Credentials_CredentialData)(nil), } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ (*ResourceSelector_MatchName)(nil), (*ResourceSelector_MatchLabels)(nil), } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, + RawDescriptor: file_apiextensions_fn_proto_v1_run_function_proto_rawDesc, NumEnums: 4, NumMessages: 21, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes, - DependencyIndexes: file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs, - EnumInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes, - MessageInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes, + GoTypes: file_apiextensions_fn_proto_v1_run_function_proto_goTypes, + DependencyIndexes: file_apiextensions_fn_proto_v1_run_function_proto_depIdxs, + EnumInfos: file_apiextensions_fn_proto_v1_run_function_proto_enumTypes, + MessageInfos: file_apiextensions_fn_proto_v1_run_function_proto_msgTypes, }.Build() - File_apiextensions_fn_proto_v1beta1_run_function_proto = out.File - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = nil - file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = nil - file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = nil + File_apiextensions_fn_proto_v1_run_function_proto = out.File + file_apiextensions_fn_proto_v1_run_function_proto_rawDesc = nil + file_apiextensions_fn_proto_v1_run_function_proto_goTypes = nil + file_apiextensions_fn_proto_v1_run_function_proto_depIdxs = nil } diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1/run_function.proto similarity index 99% rename from apis/apiextensions/fn/proto/v1beta1/run_function.proto rename to apis/apiextensions/fn/proto/v1/run_function.proto index 18060fb3c..b66e970fd 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1/run_function.proto @@ -19,9 +19,9 @@ syntax = "proto3"; import "google/protobuf/struct.proto"; import "google/protobuf/duration.proto"; -package apiextensions.fn.proto.v1beta1; +package apiextensions.fn.proto.v1; -option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1"; +option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1"; // A FunctionRunnerService is a Composition Function. service FunctionRunnerService { diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go b/apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go similarity index 94% rename from apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go rename to apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go index 5ea2c19d6..81d40b60b 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go +++ b/apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go @@ -17,9 +17,9 @@ // versions: // - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) -// source: apiextensions/fn/proto/v1beta1/run_function.proto +// source: apiextensions/fn/proto/v1/run_function.proto -package v1beta1 +package v1 import ( context "context" @@ -34,7 +34,7 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1beta1.FunctionRunnerService/RunFunction" + FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1.FunctionRunnerService/RunFunction" ) // FunctionRunnerServiceClient is the client API for FunctionRunnerService service. @@ -113,7 +113,7 @@ func _FunctionRunnerService_RunFunction_Handler(srv interface{}, ctx context.Con // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "apiextensions.fn.proto.v1beta1.FunctionRunnerService", + ServiceName: "apiextensions.fn.proto.v1.FunctionRunnerService", HandlerType: (*FunctionRunnerServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -122,5 +122,5 @@ var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "apiextensions/fn/proto/v1beta1/run_function.proto", + Metadata: "apiextensions/fn/proto/v1/run_function.proto", } diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index 19c4d5287..fc8c07886 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -43,16 +43,19 @@ type CompositionRevisionSpec struct { // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -64,6 +67,9 @@ type CompositionRevisionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -80,6 +86,9 @@ type CompositionRevisionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -89,10 +98,9 @@ type CompositionRevisionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index 5cc6d8960..4ffe3c44c 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -29,16 +29,19 @@ type CompositionSpec struct { // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -50,6 +53,9 @@ type CompositionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -66,6 +72,9 @@ type CompositionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -75,9 +84,6 @@ type CompositionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +listType=map // +listMapKey=step diff --git a/apis/generate.go b/apis/generate.go index c272ffe2b..c06d53324 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -32,8 +32,14 @@ limitations under the License. //go:generate ../hack/duplicate_api_type.sh apiextensions/v1/composition_transforms.go apiextensions/v1beta1 //go:generate ../hack/duplicate_api_type.sh apiextensions/v1/composition_environment.go apiextensions/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/package_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/package_runtime_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/revision_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/function_types.go pkg/v1beta1 + //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/configuration_types.go pkg/meta/v1alpha1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/provider_types.go pkg/meta/v1alpha1 +//go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/function_types.go pkg/meta/v1beta1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/meta.go pkg/meta/v1alpha1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/meta.go pkg/meta/v1beta1 @@ -57,6 +63,7 @@ limitations under the License. // Generate conversion code //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./apiextensions/v1 //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1alpha1 +//go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1beta1 // Generate gRPC types and stubs. // diff --git a/apis/pkg/meta/v1/conversion.go b/apis/pkg/meta/v1/conversion.go index 98ff6c683..7880d49a4 100644 --- a/apis/pkg/meta/v1/conversion.go +++ b/apis/pkg/meta/v1/conversion.go @@ -21,3 +21,6 @@ func (p *Provider) Hub() {} // Hub marks this type as the conversion hub. func (c *Configuration) Hub() {} + +// Hub marks this type as the conversion hub. +func (f *Function) Hub() {} diff --git a/apis/pkg/meta/v1beta1/function_types.go b/apis/pkg/meta/v1/function_types.go similarity index 98% rename from apis/pkg/meta/v1beta1/function_types.go rename to apis/pkg/meta/v1/function_types.go index cd603b534..5a22b4732 100644 --- a/apis/pkg/meta/v1beta1/function_types.go +++ b/apis/pkg/meta/v1/function_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/apis/pkg/meta/v1/interfaces.go b/apis/pkg/meta/v1/interfaces.go index fc578fbbc..7310b91be 100644 --- a/apis/pkg/meta/v1/interfaces.go +++ b/apis/pkg/meta/v1/interfaces.go @@ -19,6 +19,7 @@ package v1 var ( _ Pkg = &Configuration{} _ Pkg = &Provider{} + _ Pkg = &Function{} ) // Pkg is a description of a Crossplane package. @@ -49,3 +50,13 @@ func (p *Provider) GetCrossplaneConstraints() *CrossplaneConstraints { func (p *Provider) GetDependencies() []Dependency { return p.Spec.MetaSpec.DependsOn } + +// GetCrossplaneConstraints gets the Function package's Crossplane version constraints. +func (f *Function) GetCrossplaneConstraints() *CrossplaneConstraints { + return f.Spec.MetaSpec.Crossplane +} + +// GetDependencies gets the Function package's dependencies. +func (f *Function) GetDependencies() []Dependency { + return f.Spec.DependsOn +} diff --git a/apis/pkg/meta/v1/register.go b/apis/pkg/meta/v1/register.go index 5444051c9..0136ebcaa 100644 --- a/apis/pkg/meta/v1/register.go +++ b/apis/pkg/meta/v1/register.go @@ -56,7 +56,16 @@ var ( ConfigurationGroupVersionKind = SchemeGroupVersion.WithKind(ConfigurationKind) ) +// Function type metadata. +var ( + FunctionKind = reflect.TypeOf(Function{}).Name() + FunctionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionKind}.String() + FunctionKindAPIVersion = FunctionKind + "." + SchemeGroupVersion.String() + FunctionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionKind) +) + func init() { SchemeBuilder.Register(&Configuration{}) SchemeBuilder.Register(&Provider{}) + SchemeBuilder.Register(&Function{}) } diff --git a/apis/pkg/meta/v1/zz_generated.deepcopy.go b/apis/pkg/meta/v1/zz_generated.deepcopy.go index 96d2c4505..bc1d84595 100644 --- a/apis/pkg/meta/v1/zz_generated.deepcopy.go +++ b/apis/pkg/meta/v1/zz_generated.deepcopy.go @@ -139,6 +139,53 @@ func (in *Dependency) DeepCopy() *Dependency { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.MetaSpec.DeepCopyInto(&out.MetaSpec) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetaSpec) DeepCopyInto(out *MetaSpec) { *out = *in diff --git a/apis/pkg/meta/v1beta1/conversion.go b/apis/pkg/meta/v1beta1/conversion.go index 872d93c32..9dcea5362 100644 --- a/apis/pkg/meta/v1beta1/conversion.go +++ b/apis/pkg/meta/v1beta1/conversion.go @@ -16,5 +16,74 @@ limitations under the License. package v1beta1 -// Hub marks this type as the conversion hub. -func (f *Function) Hub() {} +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + + v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" +) + +const ( + errWrongConvertToFunction = "must convert to *v1.Function" + errWrongConvertFromFunction = "must convert from *v1.Function" +) + +// A ToHubConverter converts v1beta1 types to the 'hub' v1 type. +// +// goverter:converter +// goverter:name GeneratedToHubConverter +// goverter:extend ConvertObjectMeta +// goverter:output:file ./zz_generated.conversion.go +// goverter:output:package github.com/crossplane/crossplane/apis/pkg/meta/v1beta1 +// +k8s:deepcopy-gen=false +type ToHubConverter interface { + Function(in *Function) *v1.Function +} + +// A FromHubConverter converts v1beta1 types from the 'hub' v1 type. +// +// goverter:converter +// goverter:name GeneratedFromHubConverter +// goverter:extend ConvertObjectMeta +// goverter:output:file ./zz_generated.conversion.go +// goverter:output:package github.com/crossplane/crossplane/apis/pkg/meta/v1beta1 +// +k8s:deepcopy-gen=false +type FromHubConverter interface { + Function(in *v1.Function) *Function +} + +// ConvertObjectMeta 'converts' ObjectMeta by producing a deepcopy. This +// is necessary because goverter can't convert metav1.Time. It also prevents +// goverter generating code that is functionally identical to deepcopygen's. +func ConvertObjectMeta(in metav1.ObjectMeta) metav1.ObjectMeta { + out := in.DeepCopy() + return *out +} + +// ConvertTo converts this Function to the Hub version. +func (c *Function) ConvertTo(hub conversion.Hub) error { + out, ok := hub.(*v1.Function) + if !ok { + return errors.New(errWrongConvertToFunction) + } + + conv := &GeneratedToHubConverter{} + *out = *conv.Function(c) + + return nil +} + +// ConvertFrom converts this Function from the Hub version. +func (c *Function) ConvertFrom(hub conversion.Hub) error { + in, ok := hub.(*v1.Function) + if !ok { + return errors.New(errWrongConvertFromFunction) + } + + conv := &GeneratedFromHubConverter{} + *c = *conv.Function(in) + + return nil +} diff --git a/apis/pkg/meta/v1beta1/function_interfaces.go b/apis/pkg/meta/v1beta1/function_interfaces.go deleted file mode 100644 index aec145335..000000000 --- a/apis/pkg/meta/v1beta1/function_interfaces.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" -) - -// GetCrossplaneConstraints gets the Function package's Crossplane version constraints. -func (f *Function) GetCrossplaneConstraints() *v1.CrossplaneConstraints { - if f.Spec.MetaSpec.Crossplane == nil { - return nil - } - - cc := v1.CrossplaneConstraints{Version: f.Spec.MetaSpec.Crossplane.Version} - return &cc -} - -// GetDependencies gets the Function package's dependencies. -func (f *Function) GetDependencies() []v1.Dependency { - if f.Spec.MetaSpec.DependsOn == nil { - return []v1.Dependency{} - } - - d := make([]v1.Dependency, len(f.Spec.MetaSpec.DependsOn)) - for i, dep := range f.Spec.MetaSpec.DependsOn { - d[i] = v1.Dependency{ - Provider: dep.Provider, - Configuration: dep.Configuration, - Function: dep.Function, - Version: dep.Version, - } - } - - return d -} diff --git a/apis/pkg/meta/v1beta1/zz_generated.conversion.go b/apis/pkg/meta/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..74712ca99 --- /dev/null +++ b/apis/pkg/meta/v1beta1/zz_generated.conversion.go @@ -0,0 +1,161 @@ +// Code generated by github.com/jmattheis/goverter, DO NOT EDIT. +//go:build !goverter + +package v1beta1 + +import ( + v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type GeneratedFromHubConverter struct{} + +func (c *GeneratedFromHubConverter) Function(source *v1.Function) *Function { + var pV1beta1Function *Function + if source != nil { + var v1beta1Function Function + v1beta1Function.TypeMeta = c.v1TypeMetaToV1TypeMeta((*source).TypeMeta) + v1beta1Function.ObjectMeta = ConvertObjectMeta((*source).ObjectMeta) + v1beta1Function.Spec = c.v1FunctionSpecToV1beta1FunctionSpec((*source).Spec) + pV1beta1Function = &v1beta1Function + } + return pV1beta1Function +} +func (c *GeneratedFromHubConverter) pV1CrossplaneConstraintsToPV1beta1CrossplaneConstraints(source *v1.CrossplaneConstraints) *CrossplaneConstraints { + var pV1beta1CrossplaneConstraints *CrossplaneConstraints + if source != nil { + var v1beta1CrossplaneConstraints CrossplaneConstraints + v1beta1CrossplaneConstraints.Version = (*source).Version + pV1beta1CrossplaneConstraints = &v1beta1CrossplaneConstraints + } + return pV1beta1CrossplaneConstraints +} +func (c *GeneratedFromHubConverter) v1DependencyToV1beta1Dependency(source v1.Dependency) Dependency { + var v1beta1Dependency Dependency + var pString *string + if source.Provider != nil { + xstring := *source.Provider + pString = &xstring + } + v1beta1Dependency.Provider = pString + var pString2 *string + if source.Configuration != nil { + xstring2 := *source.Configuration + pString2 = &xstring2 + } + v1beta1Dependency.Configuration = pString2 + var pString3 *string + if source.Function != nil { + xstring3 := *source.Function + pString3 = &xstring3 + } + v1beta1Dependency.Function = pString3 + v1beta1Dependency.Version = source.Version + return v1beta1Dependency +} +func (c *GeneratedFromHubConverter) v1FunctionSpecToV1beta1FunctionSpec(source v1.FunctionSpec) FunctionSpec { + var v1beta1FunctionSpec FunctionSpec + v1beta1FunctionSpec.MetaSpec = c.v1MetaSpecToV1beta1MetaSpec(source.MetaSpec) + var pString *string + if source.Image != nil { + xstring := *source.Image + pString = &xstring + } + v1beta1FunctionSpec.Image = pString + return v1beta1FunctionSpec +} +func (c *GeneratedFromHubConverter) v1MetaSpecToV1beta1MetaSpec(source v1.MetaSpec) MetaSpec { + var v1beta1MetaSpec MetaSpec + v1beta1MetaSpec.Crossplane = c.pV1CrossplaneConstraintsToPV1beta1CrossplaneConstraints(source.Crossplane) + var v1beta1DependencyList []Dependency + if source.DependsOn != nil { + v1beta1DependencyList = make([]Dependency, len(source.DependsOn)) + for i := 0; i < len(source.DependsOn); i++ { + v1beta1DependencyList[i] = c.v1DependencyToV1beta1Dependency(source.DependsOn[i]) + } + } + v1beta1MetaSpec.DependsOn = v1beta1DependencyList + return v1beta1MetaSpec +} +func (c *GeneratedFromHubConverter) v1TypeMetaToV1TypeMeta(source v11.TypeMeta) v11.TypeMeta { + var v1TypeMeta v11.TypeMeta + v1TypeMeta.Kind = source.Kind + v1TypeMeta.APIVersion = source.APIVersion + return v1TypeMeta +} + +type GeneratedToHubConverter struct{} + +func (c *GeneratedToHubConverter) Function(source *Function) *v1.Function { + var pV1Function *v1.Function + if source != nil { + var v1Function v1.Function + v1Function.TypeMeta = c.v1TypeMetaToV1TypeMeta((*source).TypeMeta) + v1Function.ObjectMeta = ConvertObjectMeta((*source).ObjectMeta) + v1Function.Spec = c.v1beta1FunctionSpecToV1FunctionSpec((*source).Spec) + pV1Function = &v1Function + } + return pV1Function +} +func (c *GeneratedToHubConverter) pV1beta1CrossplaneConstraintsToPV1CrossplaneConstraints(source *CrossplaneConstraints) *v1.CrossplaneConstraints { + var pV1CrossplaneConstraints *v1.CrossplaneConstraints + if source != nil { + var v1CrossplaneConstraints v1.CrossplaneConstraints + v1CrossplaneConstraints.Version = (*source).Version + pV1CrossplaneConstraints = &v1CrossplaneConstraints + } + return pV1CrossplaneConstraints +} +func (c *GeneratedToHubConverter) v1TypeMetaToV1TypeMeta(source v11.TypeMeta) v11.TypeMeta { + var v1TypeMeta v11.TypeMeta + v1TypeMeta.Kind = source.Kind + v1TypeMeta.APIVersion = source.APIVersion + return v1TypeMeta +} +func (c *GeneratedToHubConverter) v1beta1DependencyToV1Dependency(source Dependency) v1.Dependency { + var v1Dependency v1.Dependency + var pString *string + if source.Provider != nil { + xstring := *source.Provider + pString = &xstring + } + v1Dependency.Provider = pString + var pString2 *string + if source.Configuration != nil { + xstring2 := *source.Configuration + pString2 = &xstring2 + } + v1Dependency.Configuration = pString2 + var pString3 *string + if source.Function != nil { + xstring3 := *source.Function + pString3 = &xstring3 + } + v1Dependency.Function = pString3 + v1Dependency.Version = source.Version + return v1Dependency +} +func (c *GeneratedToHubConverter) v1beta1FunctionSpecToV1FunctionSpec(source FunctionSpec) v1.FunctionSpec { + var v1FunctionSpec v1.FunctionSpec + v1FunctionSpec.MetaSpec = c.v1beta1MetaSpecToV1MetaSpec(source.MetaSpec) + var pString *string + if source.Image != nil { + xstring := *source.Image + pString = &xstring + } + v1FunctionSpec.Image = pString + return v1FunctionSpec +} +func (c *GeneratedToHubConverter) v1beta1MetaSpecToV1MetaSpec(source MetaSpec) v1.MetaSpec { + var v1MetaSpec v1.MetaSpec + v1MetaSpec.Crossplane = c.pV1beta1CrossplaneConstraintsToPV1CrossplaneConstraints(source.Crossplane) + var v1DependencyList []v1.Dependency + if source.DependsOn != nil { + v1DependencyList = make([]v1.Dependency, len(source.DependsOn)) + for i := 0; i < len(source.DependsOn); i++ { + v1DependencyList[i] = c.v1beta1DependencyToV1Dependency(source.DependsOn[i]) + } + } + v1MetaSpec.DependsOn = v1DependencyList + return v1MetaSpec +} diff --git a/apis/pkg/meta/v1beta1/function_interfaces_test.go b/apis/pkg/meta/v1beta1/zz_generated.function_types.go similarity index 50% rename from apis/pkg/meta/v1beta1/function_interfaces_test.go rename to apis/pkg/meta/v1beta1/zz_generated.function_types.go index 9ddba9c7d..0698e1bcf 100644 --- a/apis/pkg/meta/v1beta1/function_interfaces_test.go +++ b/apis/pkg/meta/v1beta1/zz_generated.function_types.go @@ -14,10 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Generated from pkg/meta/v1/function_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + package v1beta1 import ( - v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var _ v1.Pkg = &Function{} +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + MetaSpec `json:",inline"` + + // Image is the packaged Function image. + Image *string `json:"image,omitempty"` +} + +// +kubebuilder:object:root=true + +// A Function is the description of a Crossplane Function package. +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec"` +} diff --git a/apis/pkg/v1beta1/function_types.go b/apis/pkg/v1/function_types.go similarity index 84% rename from apis/pkg/v1beta1/function_types.go rename to apis/pkg/v1/function_types.go index f6d3bc55d..aa2072e37 100644 --- a/apis/pkg/v1beta1/function_types.go +++ b/apis/pkg/v1/function_types.go @@ -14,20 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - - // TODO(negz): Ideally our v1beta1 package wouldn't import types from v1, as - // this strongly couples the types. This would make life difficult if we - // wanted to evolve this package in a different direction from the current - // v1 implementation. Unfortunately the package manager implementation - // requires any type that is reconciled as a package (or revision) to - // satisfy interfaces that involve returning v1 types. - v1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // +kubebuilder:object:root=true @@ -56,15 +48,15 @@ type Function struct { // FunctionSpec specifies the configuration of a Function. type FunctionSpec struct { - v1.PackageSpec `json:",inline"` + PackageSpec `json:",inline"` - v1.PackageRuntimeSpec `json:",inline"` + PackageRuntimeSpec `json:",inline"` } // FunctionStatus represents the observed state of a Function. type FunctionStatus struct { xpv1.ConditionedStatus `json:",inline"` - v1.PackageStatus `json:",inline"` + PackageStatus `json:",inline"` } // +kubebuilder:object:root=true @@ -78,8 +70,8 @@ type FunctionList struct { // FunctionRevisionSpec specifies configuration for a FunctionRevision. type FunctionRevisionSpec struct { - v1.PackageRevisionSpec `json:",inline"` - v1.PackageRevisionRuntimeSpec `json:",inline"` + PackageRevisionSpec `json:",inline"` + PackageRevisionRuntimeSpec `json:",inline"` } // +kubebuilder:object:root=true @@ -111,7 +103,7 @@ type FunctionRevision struct { // FunctionRevisionStatus represents the observed state of a FunctionRevision. type FunctionRevisionStatus struct { - v1.PackageRevisionStatus `json:",inline"` + PackageRevisionStatus `json:",inline"` // Endpoint is the gRPC endpoint where Crossplane will send // RunFunctionRequests. diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index bcb00885e..3fcc4ab08 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -760,3 +760,312 @@ func GetSecretNameWithSuffix(name, suffix string) *string { return &s } + +// GetCondition of this Function. +func (f *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return f.Status.GetCondition(ct) +} + +// SetConditions of this Function. +func (f *Function) SetConditions(c ...xpv1.Condition) { + f.Status.SetConditions(c...) +} + +// CleanConditions removes all conditions. +func (f *Function) CleanConditions() { + f.Status.Conditions = []xpv1.Condition{} +} + +// GetSource of this Function. +func (f *Function) GetSource() string { + return f.Spec.Package +} + +// SetSource of this Function. +func (f *Function) SetSource(s string) { + f.Spec.Package = s +} + +// GetActivationPolicy of this Function. +func (f *Function) GetActivationPolicy() *RevisionActivationPolicy { + return f.Spec.RevisionActivationPolicy +} + +// SetActivationPolicy of this Function. +func (f *Function) SetActivationPolicy(a *RevisionActivationPolicy) { + f.Spec.RevisionActivationPolicy = a +} + +// GetPackagePullSecrets of this Function. +func (f *Function) GetPackagePullSecrets() []corev1.LocalObjectReference { + return f.Spec.PackagePullSecrets +} + +// SetPackagePullSecrets of this Function. +func (f *Function) SetPackagePullSecrets(s []corev1.LocalObjectReference) { + f.Spec.PackagePullSecrets = s +} + +// GetPackagePullPolicy of this Function. +func (f *Function) GetPackagePullPolicy() *corev1.PullPolicy { + return f.Spec.PackagePullPolicy +} + +// SetPackagePullPolicy of this Function. +func (f *Function) SetPackagePullPolicy(i *corev1.PullPolicy) { + f.Spec.PackagePullPolicy = i +} + +// GetRevisionHistoryLimit of this Function. +func (f *Function) GetRevisionHistoryLimit() *int64 { + return f.Spec.RevisionHistoryLimit +} + +// SetRevisionHistoryLimit of this Function. +func (f *Function) SetRevisionHistoryLimit(l *int64) { + f.Spec.RevisionHistoryLimit = l +} + +// GetIgnoreCrossplaneConstraints of this Function. +func (f *Function) GetIgnoreCrossplaneConstraints() *bool { + return f.Spec.IgnoreCrossplaneConstraints +} + +// SetIgnoreCrossplaneConstraints of this Function. +func (f *Function) SetIgnoreCrossplaneConstraints(b *bool) { + f.Spec.IgnoreCrossplaneConstraints = b +} + +// GetControllerConfigRef of this Function. +func (f *Function) GetControllerConfigRef() *ControllerConfigReference { + return nil +} + +// SetControllerConfigRef of this Function. +func (f *Function) SetControllerConfigRef(*ControllerConfigReference) {} + +// GetRuntimeConfigRef of this Function. +func (f *Function) GetRuntimeConfigRef() *RuntimeConfigReference { + return f.Spec.RuntimeConfigReference +} + +// SetRuntimeConfigRef of this Function. +func (f *Function) SetRuntimeConfigRef(r *RuntimeConfigReference) { + f.Spec.RuntimeConfigReference = r +} + +// GetCurrentRevision of this Function. +func (f *Function) GetCurrentRevision() string { + return f.Status.CurrentRevision +} + +// SetCurrentRevision of this Function. +func (f *Function) SetCurrentRevision(s string) { + f.Status.CurrentRevision = s +} + +// GetSkipDependencyResolution of this Function. +func (f *Function) GetSkipDependencyResolution() *bool { + return f.Spec.SkipDependencyResolution +} + +// SetSkipDependencyResolution of this Function. +func (f *Function) SetSkipDependencyResolution(b *bool) { + f.Spec.SkipDependencyResolution = b +} + +// GetCurrentIdentifier of this Function. +func (f *Function) GetCurrentIdentifier() string { + return f.Status.CurrentIdentifier +} + +// SetCurrentIdentifier of this Function. +func (f *Function) SetCurrentIdentifier(s string) { + f.Status.CurrentIdentifier = s +} + +// GetCommonLabels of this Function. +func (f *Function) GetCommonLabels() map[string]string { + return f.Spec.CommonLabels +} + +// SetCommonLabels of this Function. +func (f *Function) SetCommonLabels(l map[string]string) { + f.Spec.CommonLabels = l +} + +// GetTLSServerSecretName of this Function. +func (f *Function) GetTLSServerSecretName() *string { + return GetSecretNameWithSuffix(f.GetName(), TLSServerSecretNameSuffix) +} + +// GetTLSClientSecretName of this Function. +func (f *Function) GetTLSClientSecretName() *string { + return nil +} + +// GetCondition of this FunctionRevision. +func (r *FunctionRevision) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return r.Status.GetCondition(ct) +} + +// SetConditions of this FunctionRevision. +func (r *FunctionRevision) SetConditions(c ...xpv1.Condition) { + r.Status.SetConditions(c...) +} + +// CleanConditions removes all conditions. +func (r *FunctionRevision) CleanConditions() { + r.Status.Conditions = []xpv1.Condition{} +} + +// GetObjects of this FunctionRevision. +func (r *FunctionRevision) GetObjects() []xpv1.TypedReference { + return r.Status.ObjectRefs +} + +// SetObjects of this FunctionRevision. +func (r *FunctionRevision) SetObjects(c []xpv1.TypedReference) { + r.Status.ObjectRefs = c +} + +// GetSource of this FunctionRevision. +func (r *FunctionRevision) GetSource() string { + return r.Spec.Package +} + +// SetSource of this FunctionRevision. +func (r *FunctionRevision) SetSource(s string) { + r.Spec.Package = s +} + +// GetPackagePullSecrets of this FunctionRevision. +func (r *FunctionRevision) GetPackagePullSecrets() []corev1.LocalObjectReference { + return r.Spec.PackagePullSecrets +} + +// SetPackagePullSecrets of this FunctionRevision. +func (r *FunctionRevision) SetPackagePullSecrets(s []corev1.LocalObjectReference) { + r.Spec.PackagePullSecrets = s +} + +// GetPackagePullPolicy of this FunctionRevision. +func (r *FunctionRevision) GetPackagePullPolicy() *corev1.PullPolicy { + return r.Spec.PackagePullPolicy +} + +// SetPackagePullPolicy of this FunctionRevision. +func (r *FunctionRevision) SetPackagePullPolicy(i *corev1.PullPolicy) { + r.Spec.PackagePullPolicy = i +} + +// GetDesiredState of this FunctionRevision. +func (r *FunctionRevision) GetDesiredState() PackageRevisionDesiredState { + return r.Spec.DesiredState +} + +// SetDesiredState of this FunctionRevision. +func (r *FunctionRevision) SetDesiredState(s PackageRevisionDesiredState) { + r.Spec.DesiredState = s +} + +// GetRevision of this FunctionRevision. +func (r *FunctionRevision) GetRevision() int64 { + return r.Spec.Revision +} + +// SetRevision of this FunctionRevision. +func (r *FunctionRevision) SetRevision(rev int64) { + r.Spec.Revision = rev +} + +// GetDependencyStatus of this v. +func (r *FunctionRevision) GetDependencyStatus() (found, installed, invalid int64) { + return r.Status.FoundDependencies, r.Status.InstalledDependencies, r.Status.InvalidDependencies +} + +// SetDependencyStatus of this FunctionRevision. +func (r *FunctionRevision) SetDependencyStatus(found, installed, invalid int64) { + r.Status.FoundDependencies = found + r.Status.InstalledDependencies = installed + r.Status.InvalidDependencies = invalid +} + +// GetIgnoreCrossplaneConstraints of this FunctionRevision. +func (r *FunctionRevision) GetIgnoreCrossplaneConstraints() *bool { + return r.Spec.IgnoreCrossplaneConstraints +} + +// SetIgnoreCrossplaneConstraints of this FunctionRevision. +func (r *FunctionRevision) SetIgnoreCrossplaneConstraints(b *bool) { + r.Spec.IgnoreCrossplaneConstraints = b +} + +// GetControllerConfigRef of this FunctionRevision. +func (r *FunctionRevision) GetControllerConfigRef() *ControllerConfigReference { + return r.Spec.ControllerConfigReference +} + +// SetControllerConfigRef of this FunctionRevision. +func (r *FunctionRevision) SetControllerConfigRef(ref *ControllerConfigReference) { + r.Spec.ControllerConfigReference = ref +} + +// GetRuntimeConfigRef of this FunctionRevision. +func (r *FunctionRevision) GetRuntimeConfigRef() *RuntimeConfigReference { + return r.Spec.RuntimeConfigReference +} + +// SetRuntimeConfigRef of this FunctionRevision. +func (r *FunctionRevision) SetRuntimeConfigRef(ref *RuntimeConfigReference) { + r.Spec.RuntimeConfigReference = ref +} + +// GetSkipDependencyResolution of this FunctionRevision. +func (r *FunctionRevision) GetSkipDependencyResolution() *bool { + return r.Spec.SkipDependencyResolution +} + +// SetSkipDependencyResolution of this FunctionRevision. +func (r *FunctionRevision) SetSkipDependencyResolution(b *bool) { + r.Spec.SkipDependencyResolution = b +} + +// GetTLSServerSecretName of this FunctionRevision. +func (r *FunctionRevision) GetTLSServerSecretName() *string { + return r.Spec.TLSServerSecretName +} + +// SetTLSServerSecretName of this FunctionRevision. +func (r *FunctionRevision) SetTLSServerSecretName(s *string) { + r.Spec.TLSServerSecretName = s +} + +// GetTLSClientSecretName of this FunctionRevision. +func (r *FunctionRevision) GetTLSClientSecretName() *string { + return r.Spec.TLSClientSecretName +} + +// SetTLSClientSecretName of this FunctionRevision. +func (r *FunctionRevision) SetTLSClientSecretName(s *string) { + r.Spec.TLSClientSecretName = s +} + +// GetCommonLabels of this FunctionRevision. +func (r *FunctionRevision) GetCommonLabels() map[string]string { + return r.Spec.CommonLabels +} + +// SetCommonLabels of this FunctionRevision. +func (r *FunctionRevision) SetCommonLabels(l map[string]string) { + r.Spec.CommonLabels = l +} + +// GetRevisions of this ConfigurationRevisionList. +func (p *FunctionRevisionList) GetRevisions() []PackageRevision { + prs := make([]PackageRevision, len(p.Items)) + for i, r := range p.Items { + prs[i] = &r + } + return prs +} diff --git a/apis/pkg/v1/interfaces_test.go b/apis/pkg/v1/interfaces_test.go index cdb00f4c7..f078fbc9d 100644 --- a/apis/pkg/v1/interfaces_test.go +++ b/apis/pkg/v1/interfaces_test.go @@ -19,14 +19,17 @@ package v1 var ( _ Package = &Provider{} _ Package = &Configuration{} + _ Package = &Function{} ) var ( _ PackageRevision = &ProviderRevision{} _ PackageRevision = &ConfigurationRevision{} + _ PackageRevision = &FunctionRevision{} ) var ( _ PackageRevisionList = &ProviderRevisionList{} _ PackageRevisionList = &ConfigurationRevisionList{} + _ PackageRevisionList = &FunctionRevisionList{} ) diff --git a/apis/pkg/v1/register.go b/apis/pkg/v1/register.go index 34b27cbfb..2094c8949 100644 --- a/apis/pkg/v1/register.go +++ b/apis/pkg/v1/register.go @@ -72,9 +72,27 @@ var ( ProviderRevisionGroupVersionKind = SchemeGroupVersion.WithKind(ProviderRevisionKind) ) +// Function type metadata. +var ( + FunctionKind = reflect.TypeOf(Function{}).Name() + FunctionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionKind}.String() + FunctionKindAPIVersion = FunctionKind + "." + SchemeGroupVersion.String() + FunctionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionKind) +) + +// FunctionRevision type metadata. +var ( + FunctionRevisionKind = reflect.TypeOf(FunctionRevision{}).Name() + FunctionRevisionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionRevisionKind}.String() + FunctionRevisionKindAPIVersion = FunctionRevisionKind + "." + SchemeGroupVersion.String() + FunctionRevisionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionRevisionKind) +) + func init() { SchemeBuilder.Register(&Configuration{}, &ConfigurationList{}) SchemeBuilder.Register(&ConfigurationRevision{}, &ConfigurationRevisionList{}) SchemeBuilder.Register(&Provider{}, &ProviderList{}) SchemeBuilder.Register(&ProviderRevision{}, &ProviderRevisionList{}) + SchemeBuilder.Register(&Function{}, &FunctionList{}) + SchemeBuilder.Register(&FunctionRevision{}, &FunctionRevisionList{}) } diff --git a/apis/pkg/v1/zz_generated.deepcopy.go b/apis/pkg/v1/zz_generated.deepcopy.go index b70272c9c..a9a86a0fb 100644 --- a/apis/pkg/v1/zz_generated.deepcopy.go +++ b/apis/pkg/v1/zz_generated.deepcopy.go @@ -208,6 +208,191 @@ func (in *ControllerReference) DeepCopy() *ControllerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevision) DeepCopyInto(out *FunctionRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevision. +func (in *FunctionRevision) DeepCopy() *FunctionRevision { + if in == nil { + return nil + } + out := new(FunctionRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionList) DeepCopyInto(out *FunctionRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionList. +func (in *FunctionRevisionList) DeepCopy() *FunctionRevisionList { + if in == nil { + return nil + } + out := new(FunctionRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionSpec) DeepCopyInto(out *FunctionRevisionSpec) { + *out = *in + in.PackageRevisionSpec.DeepCopyInto(&out.PackageRevisionSpec) + in.PackageRevisionRuntimeSpec.DeepCopyInto(&out.PackageRevisionRuntimeSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionSpec. +func (in *FunctionRevisionSpec) DeepCopy() *FunctionRevisionSpec { + if in == nil { + return nil + } + out := new(FunctionRevisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionStatus) DeepCopyInto(out *FunctionRevisionStatus) { + *out = *in + in.PackageRevisionStatus.DeepCopyInto(&out.PackageRevisionStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionStatus. +func (in *FunctionRevisionStatus) DeepCopy() *FunctionRevisionStatus { + if in == nil { + return nil + } + out := new(FunctionRevisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.PackageSpec.DeepCopyInto(&out.PackageSpec) + in.PackageRuntimeSpec.DeepCopyInto(&out.PackageRuntimeSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) + out.PackageStatus = in.PackageStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PackageRevisionRuntimeSpec) DeepCopyInto(out *PackageRevisionRuntimeSpec) { *out = *in diff --git a/apis/pkg/v1beta1/function_interfaces.go b/apis/pkg/v1beta1/function_interfaces.go deleted file mode 100644 index 7e99faf1c..000000000 --- a/apis/pkg/v1beta1/function_interfaces.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - - v1 "github.com/crossplane/crossplane/apis/pkg/v1" -) - -// GetCondition of this Function. -func (f *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return f.Status.GetCondition(ct) -} - -// SetConditions of this Function. -func (f *Function) SetConditions(c ...xpv1.Condition) { - f.Status.SetConditions(c...) -} - -// CleanConditions removes all conditions. -func (f *Function) CleanConditions() { - f.Status.Conditions = []xpv1.Condition{} -} - -// GetSource of this Function. -func (f *Function) GetSource() string { - return f.Spec.Package -} - -// SetSource of this Function. -func (f *Function) SetSource(s string) { - f.Spec.Package = s -} - -// GetActivationPolicy of this Function. -func (f *Function) GetActivationPolicy() *v1.RevisionActivationPolicy { - return f.Spec.RevisionActivationPolicy -} - -// SetActivationPolicy of this Function. -func (f *Function) SetActivationPolicy(a *v1.RevisionActivationPolicy) { - f.Spec.RevisionActivationPolicy = a -} - -// GetPackagePullSecrets of this Function. -func (f *Function) GetPackagePullSecrets() []corev1.LocalObjectReference { - return f.Spec.PackagePullSecrets -} - -// SetPackagePullSecrets of this Function. -func (f *Function) SetPackagePullSecrets(s []corev1.LocalObjectReference) { - f.Spec.PackagePullSecrets = s -} - -// GetPackagePullPolicy of this Function. -func (f *Function) GetPackagePullPolicy() *corev1.PullPolicy { - return f.Spec.PackagePullPolicy -} - -// SetPackagePullPolicy of this Function. -func (f *Function) SetPackagePullPolicy(i *corev1.PullPolicy) { - f.Spec.PackagePullPolicy = i -} - -// GetRevisionHistoryLimit of this Function. -func (f *Function) GetRevisionHistoryLimit() *int64 { - return f.Spec.RevisionHistoryLimit -} - -// SetRevisionHistoryLimit of this Function. -func (f *Function) SetRevisionHistoryLimit(l *int64) { - f.Spec.RevisionHistoryLimit = l -} - -// GetIgnoreCrossplaneConstraints of this Function. -func (f *Function) GetIgnoreCrossplaneConstraints() *bool { - return f.Spec.IgnoreCrossplaneConstraints -} - -// SetIgnoreCrossplaneConstraints of this Function. -func (f *Function) SetIgnoreCrossplaneConstraints(b *bool) { - f.Spec.IgnoreCrossplaneConstraints = b -} - -// GetControllerConfigRef of this Function. -func (f *Function) GetControllerConfigRef() *v1.ControllerConfigReference { - return nil -} - -// SetControllerConfigRef of this Function. -func (f *Function) SetControllerConfigRef(*v1.ControllerConfigReference) {} - -// GetRuntimeConfigRef of this Function. -func (f *Function) GetRuntimeConfigRef() *v1.RuntimeConfigReference { - return f.Spec.RuntimeConfigReference -} - -// SetRuntimeConfigRef of this Function. -func (f *Function) SetRuntimeConfigRef(r *v1.RuntimeConfigReference) { - f.Spec.RuntimeConfigReference = r -} - -// GetCurrentRevision of this Function. -func (f *Function) GetCurrentRevision() string { - return f.Status.CurrentRevision -} - -// SetCurrentRevision of this Function. -func (f *Function) SetCurrentRevision(s string) { - f.Status.CurrentRevision = s -} - -// GetSkipDependencyResolution of this Function. -func (f *Function) GetSkipDependencyResolution() *bool { - return f.Spec.SkipDependencyResolution -} - -// SetSkipDependencyResolution of this Function. -func (f *Function) SetSkipDependencyResolution(b *bool) { - f.Spec.SkipDependencyResolution = b -} - -// GetCurrentIdentifier of this Function. -func (f *Function) GetCurrentIdentifier() string { - return f.Status.CurrentIdentifier -} - -// SetCurrentIdentifier of this Function. -func (f *Function) SetCurrentIdentifier(s string) { - f.Status.CurrentIdentifier = s -} - -// GetCommonLabels of this Function. -func (f *Function) GetCommonLabels() map[string]string { - return f.Spec.CommonLabels -} - -// SetCommonLabels of this Function. -func (f *Function) SetCommonLabels(l map[string]string) { - f.Spec.CommonLabels = l -} - -// GetTLSServerSecretName of this Function. -func (f *Function) GetTLSServerSecretName() *string { - return v1.GetSecretNameWithSuffix(f.GetName(), v1.TLSServerSecretNameSuffix) -} - -// GetTLSClientSecretName of this Function. -func (f *Function) GetTLSClientSecretName() *string { - return nil -} - -// GetCondition of this FunctionRevision. -func (r *FunctionRevision) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return r.Status.GetCondition(ct) -} - -// SetConditions of this FunctionRevision. -func (r *FunctionRevision) SetConditions(c ...xpv1.Condition) { - r.Status.SetConditions(c...) -} - -// CleanConditions removes all conditions. -func (r *FunctionRevision) CleanConditions() { - r.Status.Conditions = []xpv1.Condition{} -} - -// GetObjects of this FunctionRevision. -func (r *FunctionRevision) GetObjects() []xpv1.TypedReference { - return r.Status.ObjectRefs -} - -// SetObjects of this FunctionRevision. -func (r *FunctionRevision) SetObjects(c []xpv1.TypedReference) { - r.Status.ObjectRefs = c -} - -// GetSource of this FunctionRevision. -func (r *FunctionRevision) GetSource() string { - return r.Spec.Package -} - -// SetSource of this FunctionRevision. -func (r *FunctionRevision) SetSource(s string) { - r.Spec.Package = s -} - -// GetPackagePullSecrets of this FunctionRevision. -func (r *FunctionRevision) GetPackagePullSecrets() []corev1.LocalObjectReference { - return r.Spec.PackagePullSecrets -} - -// SetPackagePullSecrets of this FunctionRevision. -func (r *FunctionRevision) SetPackagePullSecrets(s []corev1.LocalObjectReference) { - r.Spec.PackagePullSecrets = s -} - -// GetPackagePullPolicy of this FunctionRevision. -func (r *FunctionRevision) GetPackagePullPolicy() *corev1.PullPolicy { - return r.Spec.PackagePullPolicy -} - -// SetPackagePullPolicy of this FunctionRevision. -func (r *FunctionRevision) SetPackagePullPolicy(i *corev1.PullPolicy) { - r.Spec.PackagePullPolicy = i -} - -// GetDesiredState of this FunctionRevision. -func (r *FunctionRevision) GetDesiredState() v1.PackageRevisionDesiredState { - return r.Spec.DesiredState -} - -// SetDesiredState of this FunctionRevision. -func (r *FunctionRevision) SetDesiredState(s v1.PackageRevisionDesiredState) { - r.Spec.DesiredState = s -} - -// GetRevision of this FunctionRevision. -func (r *FunctionRevision) GetRevision() int64 { - return r.Spec.Revision -} - -// SetRevision of this FunctionRevision. -func (r *FunctionRevision) SetRevision(rev int64) { - r.Spec.Revision = rev -} - -// GetDependencyStatus of this v. -func (r *FunctionRevision) GetDependencyStatus() (found, installed, invalid int64) { - return r.Status.FoundDependencies, r.Status.InstalledDependencies, r.Status.InvalidDependencies -} - -// SetDependencyStatus of this FunctionRevision. -func (r *FunctionRevision) SetDependencyStatus(found, installed, invalid int64) { - r.Status.FoundDependencies = found - r.Status.InstalledDependencies = installed - r.Status.InvalidDependencies = invalid -} - -// GetIgnoreCrossplaneConstraints of this FunctionRevision. -func (r *FunctionRevision) GetIgnoreCrossplaneConstraints() *bool { - return r.Spec.IgnoreCrossplaneConstraints -} - -// SetIgnoreCrossplaneConstraints of this FunctionRevision. -func (r *FunctionRevision) SetIgnoreCrossplaneConstraints(b *bool) { - r.Spec.IgnoreCrossplaneConstraints = b -} - -// GetControllerConfigRef of this FunctionRevision. -func (r *FunctionRevision) GetControllerConfigRef() *v1.ControllerConfigReference { - return r.Spec.ControllerConfigReference -} - -// SetControllerConfigRef of this FunctionRevision. -func (r *FunctionRevision) SetControllerConfigRef(ref *v1.ControllerConfigReference) { - r.Spec.ControllerConfigReference = ref -} - -// GetRuntimeConfigRef of this FunctionRevision. -func (r *FunctionRevision) GetRuntimeConfigRef() *v1.RuntimeConfigReference { - return r.Spec.RuntimeConfigReference -} - -// SetRuntimeConfigRef of this FunctionRevision. -func (r *FunctionRevision) SetRuntimeConfigRef(ref *v1.RuntimeConfigReference) { - r.Spec.RuntimeConfigReference = ref -} - -// GetSkipDependencyResolution of this FunctionRevision. -func (r *FunctionRevision) GetSkipDependencyResolution() *bool { - return r.Spec.SkipDependencyResolution -} - -// SetSkipDependencyResolution of this FunctionRevision. -func (r *FunctionRevision) SetSkipDependencyResolution(b *bool) { - r.Spec.SkipDependencyResolution = b -} - -// GetTLSServerSecretName of this FunctionRevision. -func (r *FunctionRevision) GetTLSServerSecretName() *string { - return r.Spec.TLSServerSecretName -} - -// SetTLSServerSecretName of this FunctionRevision. -func (r *FunctionRevision) SetTLSServerSecretName(s *string) { - r.Spec.TLSServerSecretName = s -} - -// GetTLSClientSecretName of this FunctionRevision. -func (r *FunctionRevision) GetTLSClientSecretName() *string { - return r.Spec.TLSClientSecretName -} - -// SetTLSClientSecretName of this FunctionRevision. -func (r *FunctionRevision) SetTLSClientSecretName(s *string) { - r.Spec.TLSClientSecretName = s -} - -// GetCommonLabels of this FunctionRevision. -func (r *FunctionRevision) GetCommonLabels() map[string]string { - return r.Spec.CommonLabels -} - -// SetCommonLabels of this FunctionRevision. -func (r *FunctionRevision) SetCommonLabels(l map[string]string) { - r.Spec.CommonLabels = l -} - -// GetRevisions of this ConfigurationRevisionList. -func (p *FunctionRevisionList) GetRevisions() []v1.PackageRevision { - prs := make([]v1.PackageRevision, len(p.Items)) - for i, r := range p.Items { - prs[i] = &r - } - return prs -} diff --git a/apis/pkg/v1beta1/function_interfaces_test.go b/apis/pkg/v1beta1/function_interfaces_test.go deleted file mode 100644 index 02bb84324..000000000 --- a/apis/pkg/v1beta1/function_interfaces_test.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import v1 "github.com/crossplane/crossplane/apis/pkg/v1" - -var ( - _ v1.Package = &Function{} - _ v1.PackageRevision = &FunctionRevision{} - _ v1.PackageRevisionList = &FunctionRevisionList{} -) diff --git a/apis/pkg/v1beta1/zz_generated.deepcopy.go b/apis/pkg/v1beta1/zz_generated.deepcopy.go index 449104538..90cfe44fb 100644 --- a/apis/pkg/v1beta1/zz_generated.deepcopy.go +++ b/apis/pkg/v1beta1/zz_generated.deepcopy.go @@ -21,10 +21,43 @@ limitations under the License. package v1beta1 import ( + commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfigReference) DeepCopyInto(out *ControllerConfigReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigReference. +func (in *ControllerConfigReference) DeepCopy() *ControllerConfigReference { + if in == nil { + return nil + } + out := new(ControllerConfigReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerReference) DeepCopyInto(out *ControllerReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerReference. +func (in *ControllerReference) DeepCopy() *ControllerReference { + if in == nil { + return nil + } + out := new(ControllerReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Dependency) DeepCopyInto(out *Dependency) { *out = *in @@ -456,6 +489,219 @@ func (in *ObjectMeta) DeepCopy() *ObjectMeta { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionRuntimeSpec) DeepCopyInto(out *PackageRevisionRuntimeSpec) { + *out = *in + in.PackageRuntimeSpec.DeepCopyInto(&out.PackageRuntimeSpec) + if in.TLSServerSecretName != nil { + in, out := &in.TLSServerSecretName, &out.TLSServerSecretName + *out = new(string) + **out = **in + } + if in.TLSClientSecretName != nil { + in, out := &in.TLSClientSecretName, &out.TLSClientSecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionRuntimeSpec. +func (in *PackageRevisionRuntimeSpec) DeepCopy() *PackageRevisionRuntimeSpec { + if in == nil { + return nil + } + out := new(PackageRevisionRuntimeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionSpec) DeepCopyInto(out *PackageRevisionSpec) { + *out = *in + if in.PackagePullSecrets != nil { + in, out := &in.PackagePullSecrets, &out.PackagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.PackagePullPolicy != nil { + in, out := &in.PackagePullPolicy, &out.PackagePullPolicy + *out = new(corev1.PullPolicy) + **out = **in + } + if in.IgnoreCrossplaneConstraints != nil { + in, out := &in.IgnoreCrossplaneConstraints, &out.IgnoreCrossplaneConstraints + *out = new(bool) + **out = **in + } + if in.SkipDependencyResolution != nil { + in, out := &in.SkipDependencyResolution, &out.SkipDependencyResolution + *out = new(bool) + **out = **in + } + if in.CommonLabels != nil { + in, out := &in.CommonLabels, &out.CommonLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionSpec. +func (in *PackageRevisionSpec) DeepCopy() *PackageRevisionSpec { + if in == nil { + return nil + } + out := new(PackageRevisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionStatus) DeepCopyInto(out *PackageRevisionStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) + if in.ObjectRefs != nil { + in, out := &in.ObjectRefs, &out.ObjectRefs + *out = make([]commonv1.TypedReference, len(*in)) + copy(*out, *in) + } + if in.PermissionRequests != nil { + in, out := &in.PermissionRequests, &out.PermissionRequests + *out = make([]rbacv1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionStatus. +func (in *PackageRevisionStatus) DeepCopy() *PackageRevisionStatus { + if in == nil { + return nil + } + out := new(PackageRevisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRuntimeSpec) DeepCopyInto(out *PackageRuntimeSpec) { + *out = *in + if in.ControllerConfigReference != nil { + in, out := &in.ControllerConfigReference, &out.ControllerConfigReference + *out = new(ControllerConfigReference) + **out = **in + } + if in.RuntimeConfigReference != nil { + in, out := &in.RuntimeConfigReference, &out.RuntimeConfigReference + *out = new(RuntimeConfigReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRuntimeSpec. +func (in *PackageRuntimeSpec) DeepCopy() *PackageRuntimeSpec { + if in == nil { + return nil + } + out := new(PackageRuntimeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageSpec) DeepCopyInto(out *PackageSpec) { + *out = *in + if in.RevisionActivationPolicy != nil { + in, out := &in.RevisionActivationPolicy, &out.RevisionActivationPolicy + *out = new(RevisionActivationPolicy) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int64) + **out = **in + } + if in.PackagePullSecrets != nil { + in, out := &in.PackagePullSecrets, &out.PackagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.PackagePullPolicy != nil { + in, out := &in.PackagePullPolicy, &out.PackagePullPolicy + *out = new(corev1.PullPolicy) + **out = **in + } + if in.IgnoreCrossplaneConstraints != nil { + in, out := &in.IgnoreCrossplaneConstraints, &out.IgnoreCrossplaneConstraints + *out = new(bool) + **out = **in + } + if in.SkipDependencyResolution != nil { + in, out := &in.SkipDependencyResolution, &out.SkipDependencyResolution + *out = new(bool) + **out = **in + } + if in.CommonLabels != nil { + in, out := &in.CommonLabels, &out.CommonLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageSpec. +func (in *PackageSpec) DeepCopy() *PackageSpec { + if in == nil { + return nil + } + out := new(PackageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageStatus) DeepCopyInto(out *PackageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStatus. +func (in *PackageStatus) DeepCopy() *PackageStatus { + if in == nil { + return nil + } + out := new(PackageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfigReference) DeepCopyInto(out *RuntimeConfigReference) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfigReference. +func (in *RuntimeConfigReference) DeepCopy() *RuntimeConfigReference { + if in == nil { + return nil + } + out := new(RuntimeConfigReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountTemplate) DeepCopyInto(out *ServiceAccountTemplate) { *out = *in diff --git a/apis/pkg/v1beta1/zz_generated.function_types.go b/apis/pkg/v1beta1/zz_generated.function_types.go new file mode 100644 index 000000000..c4d244d7f --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.function_types.go @@ -0,0 +1,120 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/function_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// A Function installs an OCI compatible Crossplane package, extending +// Crossplane with support for a new kind of composition function. +// +// Read the Crossplane documentation for +// [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="PACKAGE",type="string",JSONPath=".spec.package" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkg} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` +} + +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + PackageSpec `json:",inline"` + + PackageRuntimeSpec `json:",inline"` +} + +// FunctionStatus represents the observed state of a Function. +type FunctionStatus struct { + xpv1.ConditionedStatus `json:",inline"` + PackageStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Function. +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// FunctionRevisionSpec specifies configuration for a FunctionRevision. +type FunctionRevisionSpec struct { + PackageRevisionSpec `json:",inline"` + PackageRevisionRuntimeSpec `json:",inline"` +} + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// A FunctionRevision represents a revision of a Function. Crossplane +// creates new revisions when there are changes to the Function. +// +// Crossplane creates and manages FunctionRevisions. Don't directly edit +// FunctionRevisions. +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" +// +kubebuilder:printcolumn:name="IMAGE",type="string",JSONPath=".spec.image" +// +kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.desiredState" +// +kubebuilder:printcolumn:name="DEP-FOUND",type="string",JSONPath=".status.foundDependencies" +// +kubebuilder:printcolumn:name="DEP-INSTALLED",type="string",JSONPath=".status.installedDependencies" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkgrev} +type FunctionRevision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionRevisionSpec `json:"spec,omitempty"` + Status FunctionRevisionStatus `json:"status,omitempty"` +} + +// FunctionRevisionStatus represents the observed state of a FunctionRevision. +type FunctionRevisionStatus struct { + PackageRevisionStatus `json:",inline"` + + // Endpoint is the gRPC endpoint where Crossplane will send + // RunFunctionRequests. + Endpoint string `json:"endpoint,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionRevisionList contains a list of FunctionRevision. +type FunctionRevisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionRevision `json:"items"` +} diff --git a/apis/pkg/v1beta1/zz_generated.package_runtime_types.go b/apis/pkg/v1beta1/zz_generated.package_runtime_types.go new file mode 100644 index 000000000..17134004c --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.package_runtime_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/package_runtime_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +// PackageRuntimeSpec specifies configuration for the runtime of a package. +// Only used by packages that uses a runtime, i.e. by providers and functions +// but not for configurations. +type PackageRuntimeSpec struct { + // ControllerConfigRef references a ControllerConfig resource that will be + // used to configure the packaged controller Deployment. + // Deprecated: Use RuntimeConfigReference instead. + // +optional + ControllerConfigReference *ControllerConfigReference `json:"controllerConfigRef,omitempty"` + // RuntimeConfigRef references a RuntimeConfig resource that will be used + // to configure the package runtime. + // +optional + // +kubebuilder:default={"name": "default"} + RuntimeConfigReference *RuntimeConfigReference `json:"runtimeConfigRef,omitempty"` +} + +// PackageRevisionRuntimeSpec specifies configuration for the runtime of a +// package revision. Only used by packages that uses a runtime, i.e. by +// providers and functions but not for configurations. +type PackageRevisionRuntimeSpec struct { + PackageRuntimeSpec `json:",inline"` + // TLSServerSecretName is the name of the TLS Secret that stores server + // certificates of the Provider. + // +optional + TLSServerSecretName *string `json:"tlsServerSecretName,omitempty"` + + // TLSClientSecretName is the name of the TLS Secret that stores client + // certificates of the Provider. + // +optional + TLSClientSecretName *string `json:"tlsClientSecretName,omitempty"` +} + +// A ControllerConfigReference to a ControllerConfig resource that will be used +// to configure the packaged controller Deployment. +type ControllerConfigReference struct { + // Name of the ControllerConfig. + Name string `json:"name"` +} + +// A RuntimeConfigReference to a runtime config resource that will be used +// to configure the package runtime. +type RuntimeConfigReference struct { + // API version of the referent. + // +optional + // +kubebuilder:default="pkg.crossplane.io/v1beta1" + APIVersion *string `json:"apiVersion,omitempty"` + // Kind of the referent. + // +optional + // +kubebuilder:default="DeploymentRuntimeConfig" + Kind *string `json:"kind,omitempty"` + // Name of the RuntimeConfig. + Name string `json:"name"` +} diff --git a/apis/pkg/v1beta1/zz_generated.package_types.go b/apis/pkg/v1beta1/zz_generated.package_types.go new file mode 100644 index 000000000..aad03a3bd --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.package_types.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/package_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// RevisionActivationPolicy indicates how a package should activate its +// revisions. +type RevisionActivationPolicy string + +// PackageSpec specifies the desired state of a Package. +type PackageSpec struct { + // Package is the name of the package that is being requested. + Package string `json:"package"` + + // RevisionActivationPolicy specifies how the package controller should + // update from one revision to the next. Options are Automatic or Manual. + // Default is Automatic. + // +optional + // +kubebuilder:default=Automatic + RevisionActivationPolicy *RevisionActivationPolicy `json:"revisionActivationPolicy,omitempty"` + + // RevisionHistoryLimit dictates how the package controller cleans up old + // inactive package revisions. + // Defaults to 1. Can be disabled by explicitly setting to 0. + // +optional + // +kubebuilder:default=1 + RevisionHistoryLimit *int64 `json:"revisionHistoryLimit,omitempty"` + + // PackagePullSecrets are named secrets in the same namespace that can be used + // to fetch packages from private registries. + // +optional + PackagePullSecrets []corev1.LocalObjectReference `json:"packagePullSecrets,omitempty"` + + // PackagePullPolicy defines the pull policy for the package. + // Default is IfNotPresent. + // +optional + // +kubebuilder:default=IfNotPresent + PackagePullPolicy *corev1.PullPolicy `json:"packagePullPolicy,omitempty"` + + // IgnoreCrossplaneConstraints indicates to the package manager whether to + // honor Crossplane version constrains specified by the package. + // Default is false. + // +optional + // +kubebuilder:default=false + IgnoreCrossplaneConstraints *bool `json:"ignoreCrossplaneConstraints,omitempty"` + + // SkipDependencyResolution indicates to the package manager whether to skip + // resolving dependencies for a package. Setting this value to true may have + // unintended consequences. + // Default is false. + // +optional + // +kubebuilder:default=false + SkipDependencyResolution *bool `json:"skipDependencyResolution,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + CommonLabels map[string]string `json:"commonLabels,omitempty"` +} + +// PackageStatus represents the observed state of a Package. +type PackageStatus struct { + // CurrentRevision is the name of the current package revision. It will + // reflect the most up to date revision, whether it has been activated or + // not. + CurrentRevision string `json:"currentRevision,omitempty"` + + // CurrentIdentifier is the most recent package source that was used to + // produce a revision. The package manager uses this field to determine + // whether to check for package updates for a given source when + // packagePullPolicy is set to IfNotPresent. Manually removing this field + // will cause the package manager to check that the current revision is + // correct for the given package source. + CurrentIdentifier string `json:"currentIdentifier,omitempty"` +} diff --git a/apis/pkg/v1beta1/zz_generated.revision_types.go b/apis/pkg/v1beta1/zz_generated.revision_types.go new file mode 100644 index 000000000..6fb2135e0 --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.revision_types.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/revision_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// PackageRevisionDesiredState is the desired state of the package revision. +type PackageRevisionDesiredState string + +const ( + // PackageRevisionActive is an active package revision. + PackageRevisionActive PackageRevisionDesiredState = "Active" + + // PackageRevisionInactive is an inactive package revision. + PackageRevisionInactive PackageRevisionDesiredState = "Inactive" +) + +// PackageRevisionSpec specifies the desired state of a PackageRevision. +type PackageRevisionSpec struct { + // DesiredState of the PackageRevision. Can be either Active or Inactive. + DesiredState PackageRevisionDesiredState `json:"desiredState"` + + // Package image used by install Pod to extract package contents. + Package string `json:"image"` + + // PackagePullSecrets are named secrets in the same namespace that can be + // used to fetch packages from private registries. They are also applied to + // any images pulled for the package, such as a provider's controller image. + // +optional + PackagePullSecrets []corev1.LocalObjectReference `json:"packagePullSecrets,omitempty"` + + // PackagePullPolicy defines the pull policy for the package. It is also + // applied to any images pulled for the package, such as a provider's + // controller image. + // Default is IfNotPresent. + // +optional + // +kubebuilder:default=IfNotPresent + PackagePullPolicy *corev1.PullPolicy `json:"packagePullPolicy,omitempty"` + + // Revision number. Indicates when the revision will be garbage collected + // based on the parent's RevisionHistoryLimit. + Revision int64 `json:"revision"` + + // IgnoreCrossplaneConstraints indicates to the package manager whether to + // honor Crossplane version constrains specified by the package. + // Default is false. + // +optional + // +kubebuilder:default=false + IgnoreCrossplaneConstraints *bool `json:"ignoreCrossplaneConstraints,omitempty"` + + // SkipDependencyResolution indicates to the package manager whether to skip + // resolving dependencies for a package. Setting this value to true may have + // unintended consequences. + // Default is false. + // +optional + // +kubebuilder:default=false + SkipDependencyResolution *bool `json:"skipDependencyResolution,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + CommonLabels map[string]string `json:"commonLabels,omitempty"` +} + +// PackageRevisionStatus represents the observed state of a PackageRevision. +type PackageRevisionStatus struct { + xpv1.ConditionedStatus `json:",inline"` + + // References to objects owned by PackageRevision. + ObjectRefs []xpv1.TypedReference `json:"objectRefs,omitempty"` + + // Dependency information. + FoundDependencies int64 `json:"foundDependencies,omitempty"` + InstalledDependencies int64 `json:"installedDependencies,omitempty"` + InvalidDependencies int64 `json:"invalidDependencies,omitempty"` + + // PermissionRequests made by this package. The package declares that its + // controller needs these permissions to run. The RBAC manager is + // responsible for granting them. + PermissionRequests []rbacv1.PolicyRule `json:"permissionRequests,omitempty"` +} + +// A ControllerReference references the controller (e.g. Deployment), if any, +// that is responsible for reconciling the types a package revision installs. +type ControllerReference struct { + // Name of the controller. + Name string `json:"name"` +} diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 937ed7a48..22c60210f 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -588,17 +588,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. + + + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -612,6 +615,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -988,10 +994,6 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: @@ -1062,6 +1064,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default @@ -1089,6 +1094,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource @@ -2227,17 +2235,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. + + + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -2251,6 +2262,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -2627,10 +2641,6 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: @@ -2701,6 +2711,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default @@ -2728,6 +2741,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 0cd320c31..d8fa40672 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -583,17 +583,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. + + + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -607,6 +610,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -983,10 +989,6 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: @@ -1087,6 +1089,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index ef541371a..0de8d3141 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -39,7 +39,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: AGE type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: description: |- @@ -333,3 +333,319 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.revision + name: REVISION + type: string + - jsonPath: .spec.image + name: IMAGE + type: string + - jsonPath: .spec.desiredState + name: STATE + type: string + - jsonPath: .status.foundDependencies + name: DEP-FOUND + type: string + - jsonPath: .status.installedDependencies + name: DEP-INSTALLED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + A FunctionRevision represents a revision of a Function. Crossplane + creates new revisions when there are changes to the Function. + + + Crossplane creates and manages FunctionRevisions. Don't directly edit + FunctionRevisions. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionRevisionSpec specifies configuration for a FunctionRevision. + properties: + commonLabels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + controllerConfigRef: + description: |- + ControllerConfigRef references a ControllerConfig resource that will be + used to configure the packaged controller Deployment. + Deprecated: Use RuntimeConfigReference instead. + properties: + name: + description: Name of the ControllerConfig. + type: string + required: + - name + type: object + desiredState: + description: DesiredState of the PackageRevision. Can be either Active + or Inactive. + type: string + ignoreCrossplaneConstraints: + default: false + description: |- + IgnoreCrossplaneConstraints indicates to the package manager whether to + honor Crossplane version constrains specified by the package. + Default is false. + type: boolean + image: + description: Package image used by install Pod to extract package + contents. + type: string + packagePullPolicy: + default: IfNotPresent + description: |- + PackagePullPolicy defines the pull policy for the package. It is also + applied to any images pulled for the package, such as a provider's + controller image. + Default is IfNotPresent. + type: string + packagePullSecrets: + description: |- + PackagePullSecrets are named secrets in the same namespace that can be + used to fetch packages from private registries. They are also applied to + any images pulled for the package, such as a provider's controller image. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revision: + description: |- + Revision number. Indicates when the revision will be garbage collected + based on the parent's RevisionHistoryLimit. + format: int64 + type: integer + runtimeConfigRef: + default: + name: default + description: |- + RuntimeConfigRef references a RuntimeConfig resource that will be used + to configure the package runtime. + properties: + apiVersion: + default: pkg.crossplane.io/v1beta1 + description: API version of the referent. + type: string + kind: + default: DeploymentRuntimeConfig + description: Kind of the referent. + type: string + name: + description: Name of the RuntimeConfig. + type: string + required: + - name + type: object + skipDependencyResolution: + default: false + description: |- + SkipDependencyResolution indicates to the package manager whether to skip + resolving dependencies for a package. Setting this value to true may have + unintended consequences. + Default is false. + type: boolean + tlsClientSecretName: + description: |- + TLSClientSecretName is the name of the TLS Secret that stores client + certificates of the Provider. + type: string + tlsServerSecretName: + description: |- + TLSServerSecretName is the name of the TLS Secret that stores server + certificates of the Provider. + type: string + required: + - desiredState + - image + - revision + type: object + status: + description: FunctionRevisionStatus represents the observed state of a + FunctionRevision. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endpoint: + description: |- + Endpoint is the gRPC endpoint where Crossplane will send + RunFunctionRequests. + type: string + foundDependencies: + description: Dependency information. + format: int64 + type: integer + installedDependencies: + format: int64 + type: integer + invalidDependencies: + format: int64 + type: integer + objectRefs: + description: References to objects owned by PackageRevision. + items: + description: |- + A TypedReference refers to an object by Name, Kind, and APIVersion. It is + commonly used to reference cluster-scoped objects or objects where the + namespace is already known. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + type: array + permissionRequests: + description: |- + PermissionRequests made by this package. The package declares that its + controller needs these permissions to run. The RBAC manager is + responsible for granting them. + items: + description: |- + PolicyRule holds information that describes a policy rule, but does not contain information + about who the rule applies to or which namespace the rule applies to. + properties: + apiGroups: + description: |- + APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. + items: + type: string + type: array + x-kubernetes-list-type: atomic + nonResourceURLs: + description: |- + NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resourceNames: + description: ResourceNames is an optional white list of names + that the rule applies to. An empty set means that everything + is allowed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resources: + description: Resources is a list of resources this rule applies + to. '*' represents all resources. + items: + type: string + type: array + x-kubernetes-list-type: atomic + verbs: + description: Verbs is a list of Verbs that apply to ALL the + ResourceKinds contained in this rule. '*' represents all verbs. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - verbs + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index e0a43d03a..e6183b3e9 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -30,7 +30,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: AGE type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: description: |- @@ -233,3 +233,219 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: INSTALLED + type: string + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.package + name: PACKAGE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + A Function installs an OCI compatible Crossplane package, extending + Crossplane with support for a new kind of composition function. + + + Read the Crossplane documentation for + [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec specifies the configuration of a Function. + properties: + commonLabels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + controllerConfigRef: + description: |- + ControllerConfigRef references a ControllerConfig resource that will be + used to configure the packaged controller Deployment. + Deprecated: Use RuntimeConfigReference instead. + properties: + name: + description: Name of the ControllerConfig. + type: string + required: + - name + type: object + ignoreCrossplaneConstraints: + default: false + description: |- + IgnoreCrossplaneConstraints indicates to the package manager whether to + honor Crossplane version constrains specified by the package. + Default is false. + type: boolean + package: + description: Package is the name of the package that is being requested. + type: string + packagePullPolicy: + default: IfNotPresent + description: |- + PackagePullPolicy defines the pull policy for the package. + Default is IfNotPresent. + type: string + packagePullSecrets: + description: |- + PackagePullSecrets are named secrets in the same namespace that can be used + to fetch packages from private registries. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revisionActivationPolicy: + default: Automatic + description: |- + RevisionActivationPolicy specifies how the package controller should + update from one revision to the next. Options are Automatic or Manual. + Default is Automatic. + type: string + revisionHistoryLimit: + default: 1 + description: |- + RevisionHistoryLimit dictates how the package controller cleans up old + inactive package revisions. + Defaults to 1. Can be disabled by explicitly setting to 0. + format: int64 + type: integer + runtimeConfigRef: + default: + name: default + description: |- + RuntimeConfigRef references a RuntimeConfig resource that will be used + to configure the package runtime. + properties: + apiVersion: + default: pkg.crossplane.io/v1beta1 + description: API version of the referent. + type: string + kind: + default: DeploymentRuntimeConfig + description: Kind of the referent. + type: string + name: + description: Name of the RuntimeConfig. + type: string + required: + - name + type: object + skipDependencyResolution: + default: false + description: |- + SkipDependencyResolution indicates to the package manager whether to skip + resolving dependencies for a package. Setting this value to true may have + unintended consequences. + Default is false. + type: boolean + required: + - package + type: object + status: + description: FunctionStatus represents the observed state of a Function. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentIdentifier: + description: |- + CurrentIdentifier is the most recent package source that was used to + produce a revision. The package manager uses this field to determine + whether to check for package updates for a given source when + packagePullPolicy is set to IfNotPresent. Manually removing this field + will cause the package manager to check that the current revision is + correct for the given package source. + type: string + currentRevision: + description: |- + CurrentRevision is the name of the current package revision. It will + reflect the most up to date revision, whether it has been activated or + not. + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/cmd/crank/beta/render/load.go b/cmd/crank/beta/render/load.go index 84b13f12c..6a1d11765 100644 --- a/cmd/crank/beta/render/load.go +++ b/cmd/crank/beta/render/load.go @@ -30,6 +30,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) @@ -69,20 +70,21 @@ func LoadComposition(fs afero.Fs, file string) (*apiextensionsv1.Composition, er // a directory of manifests instead of a single stream. // LoadFunctions from a stream of YAML manifests. -func LoadFunctions(filesys afero.Fs, file string) ([]pkgv1beta1.Function, error) { +func LoadFunctions(filesys afero.Fs, file string) ([]pkgv1.Function, error) { stream, err := LoadYAMLStream(filesys, file) if err != nil { return nil, errors.Wrap(err, "cannot load YAML stream from file") } - functions := make([]pkgv1beta1.Function, 0, len(stream)) + // TODO(negz): This needs to support v1beta1 functions, too. + functions := make([]pkgv1.Function, 0, len(stream)) for _, y := range stream { - f := &pkgv1beta1.Function{} + f := &pkgv1.Function{} if err := yaml.Unmarshal(y, f); err != nil { return nil, errors.Wrap(err, "cannot parse YAML Function manifest") } switch gvk := f.GroupVersionKind(); gvk { - case pkgv1beta1.FunctionGroupVersionKind: + case pkgv1.FunctionGroupVersionKind, pkgv1beta1.FunctionGroupVersionKind: functions = append(functions, *f) default: return nil, errors.Errorf("not a function: %s/%s", gvk.Kind, f.GetName()) diff --git a/cmd/crank/beta/render/load_test.go b/cmd/crank/beta/render/load_test.go index d37e61437..e1a29738e 100644 --- a/cmd/crank/beta/render/load_test.go +++ b/cmd/crank/beta/render/load_test.go @@ -160,7 +160,7 @@ func TestLoadFunctions(t *testing.T) { fs := afero.FromIOFS{FS: testdatafs} type want struct { - fns []pkgv1beta1.Function + fns []pkgv1.Function err error } cases := map[string]struct { @@ -170,11 +170,11 @@ func TestLoadFunctions(t *testing.T) { "Success": { file: "testdata/functions.yaml", want: want{ - fns: []pkgv1beta1.Function{ + fns: []pkgv1.Function{ { TypeMeta: metav1.TypeMeta{ - Kind: pkgv1beta1.FunctionKind, - APIVersion: pkgv1beta1.SchemeGroupVersion.String(), + Kind: pkgv1.FunctionKind, + APIVersion: pkgv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "function-auto-ready", @@ -183,7 +183,7 @@ func TestLoadFunctions(t *testing.T) { AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupOrphan), }, }, - Spec: pkgv1beta1.FunctionSpec{ + Spec: pkgv1.FunctionSpec{ PackageSpec: pkgv1.PackageSpec{ Package: "xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.1.2", }, @@ -201,7 +201,7 @@ func TestLoadFunctions(t *testing.T) { AnnotationKeyRuntimeDevelopmentTarget: "localhost:9444", }, }, - Spec: pkgv1beta1.FunctionSpec{ + Spec: pkgv1.FunctionSpec{ PackageSpec: pkgv1.PackageSpec{ Package: "xpkg.upbound.io/crossplane-contrib/function-dummy:v0.2.1", }, diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/beta/render/render.go index 1564dfd86..9169faedc 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/beta/render/render.go @@ -39,9 +39,9 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" ucomposite "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" ) @@ -67,7 +67,7 @@ const ( type Inputs struct { CompositeResource *ucomposite.Unstructured Composition *apiextensionsv1.Composition - Functions []pkgv1beta1.Function + Functions []pkgv1.Function ObservedResources []composed.Unstructured ExtraResources []unstructured.Unstructured Context map[string][]byte @@ -99,7 +99,7 @@ type RuntimeFunctionRunner struct { // NewRuntimeFunctionRunner returns a FunctionRunner that runs functions // locally, using the runtime configured in their annotations (e.g. Docker). It // starts all the functions and creates gRPC connections when called. -func NewRuntimeFunctionRunner(ctx context.Context, log logging.Logger, fns []pkgv1beta1.Function) (*RuntimeFunctionRunner, error) { +func NewRuntimeFunctionRunner(ctx context.Context, log logging.Logger, fns []pkgv1.Function) (*RuntimeFunctionRunner, error) { contexts := map[string]RuntimeContext{} conns := map[string]*grpc.ClientConn{} @@ -127,7 +127,7 @@ func NewRuntimeFunctionRunner(ctx context.Context, log logging.Logger, fns []pkg } // RunFunction runs the named function. -func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { +func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { r.mx.Lock() defer r.mx.Unlock() @@ -136,7 +136,7 @@ func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, re return nil, errors.Errorf("unknown Function %q - does it exist in your Functions file?", name) } - return fnv1beta1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) + return fnv1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) } // Stop all of the runner's runtimes, and close its gRPC connections. @@ -191,7 +191,7 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) } // The Function pipeline starts with empty desired state. - d := &fnv1beta1.State{} + d := &fnv1.State{} results := make([]unstructured.Unstructured, 0) @@ -216,7 +216,7 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) // results. for _, fn := range in.Composition.Spec.Pipeline { // The request to send to the function, will be updated at each iteration if needed. - req := &fnv1beta1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} + req := &fnv1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} if fn.Input != nil { in := &structpb.Struct{} @@ -241,7 +241,7 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) // Results of fatal severity stop the Composition process. for _, rs := range rsp.GetResults() { switch rs.GetSeverity() { //nolint:exhaustive // We intentionally have a broad default case. - case fnv1beta1.Severity_SEVERITY_FATAL: + case fnv1.Severity_SEVERITY_FATAL: return Outputs{}, errors.Errorf("pipeline step %q returned a fatal result: %s", fn.Step, rs.GetMessage()) default: results = append(results, unstructured.Unstructured{Object: map[string]any{ @@ -258,7 +258,7 @@ func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) desired := make([]composed.Unstructured, 0, len(d.GetResources())) var unready []string for name, dr := range d.GetResources() { - if dr.GetReady() != fnv1beta1.Ready_READY_TRUE { + if dr.GetReady() != fnv1.Ready_READY_TRUE { unready = append(unready, name) } @@ -348,11 +348,11 @@ type FilteringFetcher struct { // Fetch returns all of the underlying extra resources that match the supplied // resource selector. -func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { +func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { if len(f.extra) == 0 || rs == nil { return nil, nil } - out := &fnv1beta1.Resources{} + out := &fnv1.Resources{} for _, er := range f.extra { if rs.GetApiVersion() != er.GetAPIVersion() { continue @@ -365,7 +365,7 @@ func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1beta1.ResourceSelect if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) } - out.Items = []*fnv1beta1.Resource{{Resource: o}} + out.Items = []*fnv1.Resource{{Resource: o}} return out, nil } if rs.GetMatchLabels() != nil { @@ -374,7 +374,7 @@ func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1beta1.ResourceSelect if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) } - out.Items = append(out.GetItems(), &fnv1beta1.Resource{Resource: o}) + out.Items = append(out.GetItems(), &fnv1.Resource{Resource: o}) } } } diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/beta/render/render_test.go index 1a21df22f..4f0600697 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/beta/render/render_test.go @@ -36,9 +36,9 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" ucomposite "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" ) @@ -64,7 +64,7 @@ func TestRender(t *testing.T) { cases := map[string]struct { reason string - rsp *fnv1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse args args want want }{ @@ -104,7 +104,7 @@ func TestRender(t *testing.T) { "UnknownRuntime": { args: args{ in: Inputs{ - Functions: []pkgv1beta1.Function{{ + Functions: []pkgv1.Function{{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntime: "wat", @@ -155,18 +155,18 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Results: []*fnv1beta1.Result{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: fnv1beta1.Severity_SEVERITY_FATAL, + Severity: fnv1.Severity_SEVERITY_FATAL, }, }, }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -209,11 +209,11 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": 9001, @@ -227,7 +227,7 @@ func TestRender(t *testing.T) { } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -251,7 +251,7 @@ func TestRender(t *testing.T) { }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -377,11 +377,11 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": 9001, @@ -395,7 +395,7 @@ func TestRender(t *testing.T) { } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -404,7 +404,7 @@ func TestRender(t *testing.T) { "widgets": 9003 } }`), - Ready: fnv1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, "a-cool-resource": { Resource: MustStructJSON(`{ @@ -414,14 +414,14 @@ func TestRender(t *testing.T) { "widgets": 9002 } }`), - Ready: fnv1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, }, }, }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -546,20 +546,20 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { + Functions: []pkgv1.Function{ + func() pkgv1.Function { i := 0 - lis := NewFunctionWithRunFunc(t, func(_ context.Context, request *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { + lis := NewFunctionWithRunFunc(t, func(_ context.Context, request *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { defer func() { i++ }() switch i { case 0: - return &fnv1beta1.RunFunctionResponse{ - Requirements: &fnv1beta1.Requirements{ - ExtraResources: map[string]*fnv1beta1.ResourceSelector{ + return &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "extra-resource-by-name": { ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, @@ -575,27 +575,27 @@ func TestRender(t *testing.T) { t.Fatalf("expected extra resource to be passed to function on second call") } foo := (res.GetItems()[0].GetResource().AsMap()["spec"].(map[string]interface{}))["foo"].(string) - return &fnv1beta1.RunFunctionResponse{ - Requirements: &fnv1beta1.Requirements{ - ExtraResources: map[string]*fnv1beta1.ResourceSelector{ + return &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "extra-resource-by-name": { ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, }, }, - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": "` + foo + `" } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -624,7 +624,7 @@ func TestRender(t *testing.T) { }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -758,7 +758,7 @@ func TestRender(t *testing.T) { } } -func NewFunction(t *testing.T, rsp *fnv1beta1.RunFunctionResponse) net.Listener { +func NewFunction(t *testing.T, rsp *fnv1.RunFunctionResponse) net.Listener { t.Helper() lis, err := net.Listen("tcp", "localhost:0") @@ -767,13 +767,13 @@ func NewFunction(t *testing.T, rsp *fnv1beta1.RunFunctionResponse) net.Listener } srv := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) - fnv1beta1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{Response: rsp}) + fnv1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{Response: rsp}) go srv.Serve(lis) // This will stop when lis is closed. return lis } -func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error)) net.Listener { +func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error)) net.Listener { t.Helper() lis, err := net.Listen("tcp", "localhost:0") @@ -782,21 +782,21 @@ func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1bet } srv := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) - fnv1beta1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{RunFunc: runFunc}) + fnv1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{RunFunc: runFunc}) go srv.Serve(lis) // This will stop when lis is closed. return lis } type MockFunctionRunner struct { - fnv1beta1.UnimplementedFunctionRunnerServiceServer + fnv1.UnimplementedFunctionRunnerServiceServer - Response *fnv1beta1.RunFunctionResponse - RunFunc func(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) + Response *fnv1.RunFunctionResponse + RunFunc func(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) Error error } -func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { +func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { if r.Response != nil { return r.Response, r.Error } @@ -809,10 +809,10 @@ func TestFilterExtraResources(t *testing.T) { } type args struct { ctx context.Context - selector *fnv1beta1.ResourceSelector + selector *fnv1.ResourceSelector } type want struct { - out *fnv1beta1.Resources + out *fnv1.Resources err error } @@ -828,10 +828,10 @@ func TestFilterExtraResources(t *testing.T) { ers: []unstructured.Unstructured{}, }, args: args{ - selector: &fnv1beta1.ResourceSelector{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, @@ -898,17 +898,17 @@ func TestFilterExtraResources(t *testing.T) { }, }, args: args{ - selector: &fnv1beta1.ResourceSelector{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource-right", }, }, }, want: want{ - out: &fnv1beta1.Resources{ - Items: []*fnv1beta1.Resource{ + out: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStructJSON(`{ "apiVersion": "test.crossplane.io/v1", @@ -987,11 +987,11 @@ func TestFilterExtraResources(t *testing.T) { }, }, args: args{ - selector: &fnv1beta1.ResourceSelector{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", - Match: &fnv1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &fnv1beta1.MatchLabels{ + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ Labels: map[string]string{ "right": "true", }, @@ -1000,8 +1000,8 @@ func TestFilterExtraResources(t *testing.T) { }, }, want: want{ - out: &fnv1beta1.Resources{ - Items: []*fnv1beta1.Resource{ + out: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStructJSON(`{ "apiVersion": "test.crossplane.io/v1", @@ -1036,7 +1036,7 @@ func TestFilterExtraResources(t *testing.T) { t.Run(name, func(t *testing.T) { f := &FilteringFetcher{extra: tc.params.ers} out, err := f.Fetch(tc.args.ctx, tc.args.selector) - if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(fnv1beta1.Resources{}, fnv1beta1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { + if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(fnv1.Resources{}, fnv1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { t.Errorf("%s\nfilterExtraResources(...): -want, +got:\n%s", tc.reason, diff) } if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { diff --git a/cmd/crank/beta/render/runtime.go b/cmd/crank/beta/render/runtime.go index f83f23d0a..609745631 100644 --- a/cmd/crank/beta/render/runtime.go +++ b/cmd/crank/beta/render/runtime.go @@ -22,7 +22,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // AnnotationKeyRuntime can be added to a Function to control what runtime is @@ -63,7 +63,7 @@ type RuntimeContext struct { } // GetRuntime for the supplied Function, per its annotations. -func GetRuntime(fn pkgv1beta1.Function, log logging.Logger) (Runtime, error) { +func GetRuntime(fn pkgv1.Function, log logging.Logger) (Runtime, error) { switch r := RuntimeType(fn.GetAnnotations()[AnnotationKeyRuntime]); r { case AnnotationValueRuntimeDocker, "": return GetRuntimeDocker(fn, log) diff --git a/cmd/crank/beta/render/runtime_development.go b/cmd/crank/beta/render/runtime_development.go index 8df9eb59f..e58f2ef67 100644 --- a/cmd/crank/beta/render/runtime_development.go +++ b/cmd/crank/beta/render/runtime_development.go @@ -21,7 +21,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // Annotations that can be used to configure the Development runtime. @@ -47,7 +47,7 @@ type RuntimeDevelopment struct { // GetRuntimeDevelopment extracts RuntimeDevelopment configuration from the // supplied Function. -func GetRuntimeDevelopment(fn pkgv1beta1.Function, log logging.Logger) *RuntimeDevelopment { +func GetRuntimeDevelopment(fn pkgv1.Function, log logging.Logger) *RuntimeDevelopment { r := &RuntimeDevelopment{Target: "localhost:9443", Function: fn.GetName(), log: log} if t := fn.GetAnnotations()[AnnotationKeyRuntimeDevelopmentTarget]; t != "" { r.Target = t diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/beta/render/runtime_docker.go index 9a3aea25f..92cfee7f8 100644 --- a/cmd/crank/beta/render/runtime_docker.go +++ b/cmd/crank/beta/render/runtime_docker.go @@ -31,7 +31,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // Annotations that can be used to configure the Docker runtime. @@ -106,7 +106,7 @@ type RuntimeDocker struct { // GetDockerPullPolicy extracts PullPolicy configuration from the supplied // Function. -func GetDockerPullPolicy(fn pkgv1beta1.Function) (DockerPullPolicy, error) { +func GetDockerPullPolicy(fn pkgv1.Function) (DockerPullPolicy, error) { switch p := DockerPullPolicy(fn.GetAnnotations()[AnnotationKeyRuntimeDockerPullPolicy]); p { case AnnotationValueRuntimeDockerPullPolicyAlways, AnnotationValueRuntimeDockerPullPolicyNever, AnnotationValueRuntimeDockerPullPolicyIfNotPresent: return p, nil @@ -118,7 +118,7 @@ func GetDockerPullPolicy(fn pkgv1beta1.Function) (DockerPullPolicy, error) { } // GetDockerCleanup extracts Cleanup configuration from the supplied Function. -func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { +func GetDockerCleanup(fn pkgv1.Function) (DockerCleanup, error) { switch c := DockerCleanup(fn.GetAnnotations()[AnnotationKeyRuntimeDockerCleanup]); c { case AnnotationValueRuntimeDockerCleanupStop, AnnotationValueRuntimeDockerCleanupOrphan, AnnotationValueRuntimeDockerCleanupRemove: return c, nil @@ -131,7 +131,7 @@ func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { // GetRuntimeDocker extracts RuntimeDocker configuration from the supplied // Function. -func GetRuntimeDocker(fn pkgv1beta1.Function, log logging.Logger) (*RuntimeDocker, error) { +func GetRuntimeDocker(fn pkgv1.Function, log logging.Logger) (*RuntimeDocker, error) { cleanup, err := GetDockerCleanup(fn) if err != nil { return nil, errors.Wrapf(err, "cannot get cleanup policy for Function %q", fn.GetName()) diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/beta/render/runtime_docker_test.go index 5852d4d29..913e5f0d0 100644 --- a/cmd/crank/beta/render/runtime_docker_test.go +++ b/cmd/crank/beta/render/runtime_docker_test.go @@ -28,8 +28,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" - v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) type mockPullClient struct { @@ -44,7 +43,7 @@ var _ pullClient = &mockPullClient{} func TestGetRuntimeDocker(t *testing.T) { type args struct { - fn v1beta1.Function + fn pkgv1.Function } type want struct { rd *RuntimeDocker @@ -59,7 +58,7 @@ func TestGetRuntimeDocker(t *testing.T) { "SuccessAllSet": { reason: "should return a RuntimeDocker with all fields set according to the supplied Function's annotations", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupOrphan), @@ -67,8 +66,8 @@ func TestGetRuntimeDocker(t *testing.T) { AnnotationKeyRuntimeDockerImage: "test-image-from-annotation", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -85,12 +84,12 @@ func TestGetRuntimeDocker(t *testing.T) { "SuccessDefaults": { reason: "should return a RuntimeDocker with default fields set if no annotation are set", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -107,14 +106,14 @@ func TestGetRuntimeDocker(t *testing.T) { "ErrorUnknownAnnotationValueCleanup": { reason: "should return an error if the supplied Function has an unknown cleanup annotation value", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerCleanup: "wrong", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -127,14 +126,14 @@ func TestGetRuntimeDocker(t *testing.T) { "ErrorUnknownAnnotationPullPolicy": { reason: "should return an error if the supplied Function has an unknown pull policy annotation value", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerPullPolicy: "wrong", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -147,14 +146,14 @@ func TestGetRuntimeDocker(t *testing.T) { "AnnotationsCleanupSetToStop": { reason: "should return a RuntimeDocker with all fields set according to the supplied Function's annotations", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupStop), }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, diff --git a/cmd/crank/beta/render/testdata/functions.yaml b/cmd/crank/beta/render/testdata/functions.yaml index 9e226a407..8af674fb1 100644 --- a/cmd/crank/beta/render/testdata/functions.yaml +++ b/cmd/crank/beta/render/testdata/functions.yaml @@ -1,5 +1,5 @@ --- -apiVersion: pkg.crossplane.io/v1beta1 +apiVersion: pkg.crossplane.io/v1 kind: Function metadata: name: function-auto-ready diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client.go b/cmd/crank/beta/trace/internal/resource/xpkg/client.go index 05df3968c..2ef6d1984 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client.go @@ -32,8 +32,8 @@ import ( xpunstructured "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -95,7 +95,7 @@ func (kc *Client) GetResourceTree(ctx context.Context, root *resource.Resource) } // the root is a package type, get the lock file now - lock := &v1beta1.Lock{} + lock := &pkgv1beta1.Lock{} if err := kc.client.Get(ctx, types.NamespacedName{Name: "lock"}, lock); err != nil { return nil, err } @@ -156,7 +156,7 @@ func (kc *Client) setPackageRuntimeConfigChild(ctx context.Context, res *resourc } // We try loading both as currently both are supported and if both are present they are merged. controllerConfigRef := pkgv1.ControllerConfigReference{} - apiVersion, kind := v1alpha1.ControllerConfigGroupVersionKind.ToAPIVersionAndKind() + apiVersion, kind := pkgv1alpha1.ControllerConfigGroupVersionKind.ToAPIVersionAndKind() if err := fieldpath.Pave(res.Unstructured.Object).GetValueInto("spec.controllerConfigRef", &runtimeConfigRef); err == nil { res.Children = append(res.Children, resource.GetResource(ctx, kc.client, &v1.ObjectReference{ APIVersion: apiVersion, @@ -199,8 +199,8 @@ func (kc *Client) getRevisions(ctx context.Context, xpkg *resource.Resource) ([] revisions.SetGroupVersionKind(pkgv1.ProviderRevisionGroupVersionKind) case pkgv1.ConfigurationGroupVersionKind.GroupKind(): revisions.SetGroupVersionKind(pkgv1.ConfigurationRevisionGroupVersionKind) - case v1beta1.FunctionGroupVersionKind.GroupKind(): - revisions.SetGroupVersionKind(v1beta1.FunctionRevisionGroupVersionKind) + case pkgv1.FunctionGroupVersionKind.GroupKind(): + revisions.SetGroupVersionKind(pkgv1.FunctionRevisionGroupVersionKind) default: // If we didn't match any of the know types, we try to guess revisions.SetGroupVersionKind(gvk.GroupVersion().WithKind(gvk.Kind + "RevisionList")) @@ -221,14 +221,14 @@ func (kc *Client) getRevisions(ctx context.Context, xpkg *resource.Resource) ([] } // getPackageDetails returns the package details for the given package type. -func getPackageDetails(t v1beta1.PackageType) (string, string, pkgv1.PackageRevision, error) { +func getPackageDetails(t pkgv1beta1.PackageType) (string, string, pkgv1.PackageRevision, error) { switch t { - case v1beta1.ProviderPackageType: + case pkgv1beta1.ProviderPackageType: return pkgv1.ProviderKind, pkgv1.ProviderGroupVersionKind.GroupVersion().String(), &pkgv1.ProviderRevision{}, nil - case v1beta1.ConfigurationPackageType: + case pkgv1beta1.ConfigurationPackageType: return pkgv1.ConfigurationKind, pkgv1.ConfigurationGroupVersionKind.GroupVersion().String(), &pkgv1.ConfigurationRevision{}, nil - case v1beta1.FunctionPackageType: - return v1beta1.FunctionKind, v1beta1.FunctionGroupVersionKind.GroupVersion().String(), &v1beta1.FunctionRevision{}, nil + case pkgv1beta1.FunctionPackageType: + return pkgv1.FunctionKind, pkgv1.FunctionGroupVersionKind.GroupVersion().String(), &pkgv1.FunctionRevision{}, nil default: return "", "", nil, errors.Errorf("unknown package dependency type %s", t) } @@ -236,7 +236,7 @@ func getPackageDetails(t v1beta1.PackageType) (string, string, pkgv1.PackageRevi // getDependencyRef returns the dependency reference for the given package, // based on the lock file. -func (kc *Client) getDependencyRef(ctx context.Context, lock *v1beta1.Lock, pkgType v1beta1.PackageType, pkg string) (*v1.ObjectReference, error) { +func (kc *Client) getDependencyRef(ctx context.Context, lock *pkgv1beta1.Lock, pkgType pkgv1beta1.PackageType, pkg string) (*v1.ObjectReference, error) { // if we don't find a package to match the current dependency, which // can happen during initial installation when dependencies are // being discovered and fetched. We'd still like to show something @@ -285,7 +285,7 @@ func (kc *Client) getDependencyRef(ctx context.Context, lock *v1beta1.Lock, pkgT } // getPackageDeps returns the dependencies for the given package resource. -func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, lock *v1beta1.Lock, uniqueDeps map[string]struct{}) ([]v1.ObjectReference, error) { +func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, lock *pkgv1beta1.Lock, uniqueDeps map[string]struct{}) ([]v1.ObjectReference, error) { cr, _ := fieldpath.Pave(node.Unstructured.Object).GetString("status.currentRevision") if cr == "" { // we don't have a current package revision, so just return empty deps @@ -293,7 +293,7 @@ func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, l } // find the lock file entry for the current revision - var lp *v1beta1.LockPackage + var lp *pkgv1beta1.LockPackage for i := range lock.Packages { if lock.Packages[i].Name == cr { lp = &lock.Packages[i] diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go index f710e276a..689aae1b0 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go @@ -161,7 +161,7 @@ func TestGetDependencyRef(t *testing.T) { want: want{ err: nil, ref: &v1.ObjectReference{ - APIVersion: "pkg.crossplane.io/v1beta1", + APIVersion: "pkg.crossplane.io/v1", Kind: "Function", Name: "function-1", }, diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go index 5ab1e7ccb..f589ba707 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) @@ -54,7 +54,7 @@ const ( func IsPackageType(gk schema.GroupKind) bool { return gk == pkgv1.ProviderGroupVersionKind.GroupKind() || gk == pkgv1.ConfigurationGroupVersionKind.GroupKind() || - gk == pkgv1beta1.FunctionGroupVersionKind.GroupKind() + gk == pkgv1.FunctionGroupVersionKind.GroupKind() } // IsPackageRevisionType returns true if the GroupKind is a Crossplane package @@ -62,12 +62,12 @@ func IsPackageType(gk schema.GroupKind) bool { func IsPackageRevisionType(gk schema.GroupKind) bool { return gk == pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind() || gk == pkgv1.ProviderRevisionGroupVersionKind.GroupKind() || - gk == pkgv1beta1.FunctionRevisionGroupVersionKind.GroupKind() + gk == pkgv1.FunctionRevisionGroupVersionKind.GroupKind() } // IsPackageRuntimeConfigType returns true if the GroupKind is a Crossplane runtime // config type. func IsPackageRuntimeConfigType(gk schema.GroupKind) bool { return gk == pkgv1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind() || - gk == v1alpha1.ControllerConfigGroupVersionKind.GroupKind() + gk == pkgv1alpha1.ControllerConfigGroupVersionKind.GroupKind() } diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go index 6a9e3609a..460bb201c 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go @@ -21,9 +21,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" - v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) func TestIsPackageType(t *testing.T) { @@ -42,7 +42,7 @@ func TestIsPackageType(t *testing.T) { "V1ProviderOK": { reason: "Should return true for a v1 Provider", args: args{ - gk: v1.ProviderGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -51,7 +51,7 @@ func TestIsPackageType(t *testing.T) { "V1ConfigurationOK": { reason: "Should return true for a v1 Configuration", args: args{ - gk: v1.ConfigurationGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -60,7 +60,7 @@ func TestIsPackageType(t *testing.T) { "V1beta1FunctionOK": { reason: "Should return true for a v1beta1 Function", args: args{ - gk: v1beta1.FunctionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -69,7 +69,7 @@ func TestIsPackageType(t *testing.T) { "V1ProviderRevisionKO": { reason: "Should return false for a v1 ProviderRevision", args: args{ - gk: v1.ProviderRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -78,7 +78,7 @@ func TestIsPackageType(t *testing.T) { "V1ConfigurationRevisionKO": { reason: "Should return false for a v1 ConfigurationRevision", args: args{ - gk: v1.ConfigurationRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -87,7 +87,7 @@ func TestIsPackageType(t *testing.T) { "V1beta1FunctionRevisionKO": { reason: "Should return false for a v1beta1 FunctionRevision", args: args{ - gk: v1beta1.FunctionRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -132,7 +132,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ProviderKO": { reason: "Should return false for a v1 Provider", args: args{ - gk: v1.ProviderGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -141,7 +141,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ConfigurationKO": { reason: "Should return false for a v1 Configuration", args: args{ - gk: v1.ConfigurationGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -150,7 +150,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1beta1FunctionKO": { reason: "Should return false for a v1beta1 Function", args: args{ - gk: v1beta1.FunctionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -159,7 +159,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ProviderRevisionOK": { reason: "Should return true for a v1 ProviderRevision", args: args{ - gk: v1.ProviderRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -168,7 +168,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ConfigurationRevisionOK": { reason: "Should return true for a v1 ConfigurationRevision", args: args{ - gk: v1.ConfigurationRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -177,7 +177,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1beta1FunctionRevisionOK": { reason: "Should return true for a v1beta1 FunctionRevision", args: args{ - gk: v1beta1.FunctionRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -221,7 +221,7 @@ func TestIsPackageRuntimeConfigType(t *testing.T) { "V1Alpha1ControllerConfigOK": { reason: "Should return true for a v1alpha1 ControllerConfig", args: args{ - gk: v1alpha1.ControllerConfigGroupVersionKind.GroupKind(), + gk: pkgv1alpha1.ControllerConfigGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -230,7 +230,7 @@ func TestIsPackageRuntimeConfigType(t *testing.T) { "V1Beta1DeploymentRuntimeConfigOK": { reason: "Should return true for a v1beta1 DeploymentRuntimeConfig", args: args{ - gk: v1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind(), + gk: pkgv1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind(), }, want: want{ ok: true, diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go index 80461809c..5b8ac70d1 100644 --- a/cmd/crank/beta/validate/manager_test.go +++ b/cmd/crank/beta/validate/manager_test.go @@ -187,7 +187,7 @@ func TestConfigurationTypeSupport(t *testing.T) { // config-meta // └─►function-dep-1 // config-pkg - //└─►provider-dep-1 + // └─►provider-dep-1 reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", args: args{ extensions: []*unstructured.Unstructured{ diff --git a/cmd/crank/xpkg/install.go b/cmd/crank/xpkg/install.go index 1839cb7fb..ce69b9b38 100644 --- a/cmd/crank/xpkg/install.go +++ b/cmd/crank/xpkg/install.go @@ -132,9 +132,9 @@ func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { Spec: v1.ConfigurationSpec{PackageSpec: spec}, } case "function": - pkg = &v1beta1.Function{ + pkg = &v1.Function{ ObjectMeta: metav1.ObjectMeta{Name: pkgName}, - Spec: v1beta1.FunctionSpec{PackageSpec: spec}, + Spec: v1.FunctionSpec{PackageSpec: spec}, } default: // The enum struct tag on the Kind field should make this impossible. diff --git a/cmd/crank/xpkg/update.go b/cmd/crank/xpkg/update.go index 2c3e0fb8f..70c7c57b4 100644 --- a/cmd/crank/xpkg/update.go +++ b/cmd/crank/xpkg/update.go @@ -85,7 +85,7 @@ func (c *updateCmd) Run(k *kong.Context, logger logging.Logger) error { case "configuration": pkg = &v1.Configuration{} case "function": - pkg = &v1beta1.Function{} + pkg = &v1.Function{} default: // The enum struct tag on the Kind field should make this impossible. return errors.Errorf("unsupported package kind %q", c.Kind) diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index 7ad9b444d..af285bbea 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -114,8 +114,6 @@ type startCommand struct { EnableRealtimeCompositions bool `group:"Alpha Features:" help:"Enable support for realtime compositions, i.e. watching composed resources and reconciling compositions immediately when any of the composed resources is updated."` EnableSSAClaims bool `group:"Alpha Features:" help:"Enable support for using Kubernetes server-side apply to sync claims with composite resources (XRs)."` - EnableCompositionFunctions bool `default:"true" group:"Beta Features:" help:"Enable support for Composition Functions."` - EnableCompositionFunctionsExtraResources bool `default:"true" group:"Beta Features:" help:"Enable support for Composition Functions Extra Resources. Only respected if --enable-composition-functions is set to true."` EnableCompositionWebhookSchemaValidation bool `default:"true" group:"Beta Features:" help:"Enable support for Composition validation using schemas."` EnableDeploymentRuntimeConfigs bool `default:"true" group:"Beta Features:" help:"Enable support for Deployment Runtime Configs."` @@ -123,11 +121,16 @@ type startCommand struct { // You can't turn off a GA feature. We maintain the flags to avoid breaking // folks who are passing them, but they do nothing. The flags are hidden so // they don't show up in the help output. - EnableCompositionRevisions bool `default:"true" hidden:""` + EnableCompositionRevisions bool `default:"true" hidden:""` + EnableCompositionFunctions bool `default:"true" hidden:""` + EnableCompositionFunctionsExtraResources bool `default:"true" hidden:""` } // Run core Crossplane controllers. func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //nolint:gocognit // Only slightly over. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := ctrl.GetConfig() if err != nil { return errors.Wrap(err, "cannot get config") @@ -197,43 +200,37 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli } if !c.EnableCompositionRevisions { - log.Info("CompositionRevisions feature is GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") + log.Info("Composition Revisions are is GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") + } + if !c.EnableCompositionFunctions { + log.Info("Composition Functions are GA and cannot be disabled. The --enable-composition-functions flag will be removed in a future release.") + } + if !c.EnableCompositionFunctionsExtraResources { + log.Info("Extra Resources are GA and cannot be disabled. The --enable-composition-functions-extra-resources flag will be removed in a future release.") } - var functionRunner *xfn.PackagedFunctionRunner - if c.EnableCompositionFunctions { - o.Features.Enable(features.EnableBetaCompositionFunctions) - log.Info("Beta feature enabled", "flag", features.EnableBetaCompositionFunctions) + clienttls, err := certificates.LoadMTLSConfig( + filepath.Join(c.TLSClientCertsDir, initializer.SecretKeyCACert), + filepath.Join(c.TLSClientCertsDir, corev1.TLSCertKey), + filepath.Join(c.TLSClientCertsDir, corev1.TLSPrivateKeyKey), + false) + if err != nil { + return errors.Wrap(err, "cannot load client TLS certificates") + } - if c.EnableCompositionFunctionsExtraResources { - o.Features.Enable(features.EnableBetaCompositionFunctionsExtraResources) - log.Info("Beta feature enabled", "flag", features.EnableBetaCompositionFunctionsExtraResources) - } + m := xfn.NewMetrics() + metrics.Registry.MustRegister(m) - clienttls, err := certificates.LoadMTLSConfig( - filepath.Join(c.TLSClientCertsDir, initializer.SecretKeyCACert), - filepath.Join(c.TLSClientCertsDir, corev1.TLSCertKey), - filepath.Join(c.TLSClientCertsDir, corev1.TLSPrivateKeyKey), - false) - if err != nil { - return errors.Wrap(err, "cannot load client TLS certificates") - } - - m := xfn.NewMetrics() - metrics.Registry.MustRegister(m) + // We want all XR controllers to share the same gRPC clients. + functionRunner := xfn.NewPackagedFunctionRunner(mgr.GetClient(), + xfn.WithLogger(log), + xfn.WithTLSConfig(clienttls), + xfn.WithInterceptorCreators(m), + ) - // We want all XR controllers to share the same gRPC clients. - functionRunner = xfn.NewPackagedFunctionRunner(mgr.GetClient(), - xfn.WithLogger(log), - xfn.WithTLSConfig(clienttls), - xfn.WithInterceptorCreators(m), - ) + // Periodically remove clients for Functions that no longer exist. + go functionRunner.GarbageCollectConnections(ctx, 10*time.Minute) - // Periodically remove clients for Functions that no longer exist. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go functionRunner.GarbageCollectConnections(ctx, 10*time.Minute) - } if c.EnableEnvironmentConfigs { o.Features.Enable(features.EnableAlphaEnvironmentConfigs) log.Info("Alpha feature enabled", "flag", features.EnableAlphaEnvironmentConfigs) @@ -304,8 +301,6 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli return errors.Wrap(err, "cannot create cache for API extension controllers") } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() go func() { // Don't start the cache until the manager is elected. <-mgr.Elected() diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index be16c3acd..c8f2880f5 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -43,7 +43,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/names" ) @@ -122,14 +122,14 @@ type xr struct { // A FunctionRunner runs a single Composition Function. type FunctionRunner interface { // RunFunction runs the named Composition Function. - RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) + RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) } // A FunctionRunnerFn is a function that can run a Composition Function. -type FunctionRunnerFn func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) +type FunctionRunnerFn func(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) // RunFunction runs the named Composition Function with the supplied request. -func (fn FunctionRunnerFn) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (fn FunctionRunnerFn) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { return fn(ctx, name, req) } @@ -148,14 +148,14 @@ func (fn ComposedResourceObserverFn) ObserveComposedResources(ctx context.Contex // A ExtraResourcesFetcher gets extra resources matching a selector. type ExtraResourcesFetcher interface { - Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) + Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) } // An ExtraResourcesFetcherFn gets extra resources matching the selector. -type ExtraResourcesFetcherFn func(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) +type ExtraResourcesFetcherFn func(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) // Fetch gets extra resources matching the selector. -func (fn ExtraResourcesFetcherFn) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { +func (fn ExtraResourcesFetcherFn) Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { return fn(ctx, rs) } @@ -274,7 +274,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur } // The Function pipeline starts with empty desired state. - d := &v1beta1.State{} + d := &fnv1.State{} events := []TargetedEvent{} conditions := []TargetedCondition{} @@ -295,7 +295,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // the desired state returned by the last, and each Function may produce // results that will be emitted as events. for _, fn := range req.Revision.Spec.Pipeline { - req := &v1beta1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} + req := &fnv1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} if fn.Input != nil { in := &structpb.Struct{} @@ -305,7 +305,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur req.Input = in } - req.Credentials = map[string]*v1beta1.Credentials{} + req.Credentials = map[string]*fnv1.Credentials{} for _, cs := range fn.Credentials { // For now we only support loading credentials from secrets. if cs.Source != v1.FunctionCredentialsSourceSecret || cs.SecretRef == nil { @@ -316,9 +316,9 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur if err := c.client.Get(ctx, client.ObjectKey{Namespace: cs.SecretRef.Namespace, Name: cs.SecretRef.Name}, s); err != nil { return CompositionResult{}, errors.Wrapf(err, errFmtGetCredentialsFromSecret, fn.Step, cs.Name) } - req.Credentials[cs.Name] = &v1beta1.Credentials{ - Source: &v1beta1.Credentials_CredentialData{ - CredentialData: &v1beta1.CredentialData{ + req.Credentials[cs.Name] = &fnv1.Credentials{ + Source: &fnv1.Credentials_CredentialData{ + CredentialData: &fnv1.CredentialData{ Data: s.Data, }, }, @@ -342,11 +342,11 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur for _, c := range rsp.GetConditions() { var status corev1.ConditionStatus switch c.GetStatus() { - case v1beta1.Status_STATUS_CONDITION_TRUE: + case fnv1.Status_STATUS_CONDITION_TRUE: status = corev1.ConditionTrue - case v1beta1.Status_STATUS_CONDITION_FALSE: + case fnv1.Status_STATUS_CONDITION_FALSE: status = corev1.ConditionFalse - case v1beta1.Status_STATUS_CONDITION_UNKNOWN, v1beta1.Status_STATUS_CONDITION_UNSPECIFIED: + case fnv1.Status_STATUS_CONDITION_UNKNOWN, fnv1.Status_STATUS_CONDITION_UNSPECIFIED: status = corev1.ConditionUnknown } @@ -373,15 +373,15 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur e := TargetedEvent{Target: convertTarget(rs.GetTarget())} switch rs.GetSeverity() { - case v1beta1.Severity_SEVERITY_FATAL: + case fnv1.Severity_SEVERITY_FATAL: return CompositionResult{Events: events, Conditions: conditions}, errors.Errorf(errFmtFatalResult, fn.Step, rs.GetMessage()) - case v1beta1.Severity_SEVERITY_WARNING: + case fnv1.Severity_SEVERITY_WARNING: e.Event = event.Warning(reason, errors.New(rs.GetMessage())) e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) - case v1beta1.Severity_SEVERITY_NORMAL: + case fnv1.Severity_SEVERITY_NORMAL: e.Event = event.Normal(reason, rs.GetMessage()) e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) - case v1beta1.Severity_SEVERITY_UNSPECIFIED: + case fnv1.Severity_SEVERITY_UNSPECIFIED: // We could hit this case if a Function was built against a newer // protobuf than this build of Crossplane, and the new protobuf // introduced a severity that we don't know about. @@ -435,7 +435,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur desired[ResourceName(name)] = ComposedResourceState{ Resource: cd, ConnectionDetails: dr.GetConnectionDetails(), - Ready: dr.GetReady() == v1beta1.Ready_READY_TRUE, + Ready: dr.GetReady() == fnv1.Ready_READY_TRUE, } } @@ -650,25 +650,25 @@ func (g *ExistingComposedResourceObserver) ObserveComposedResources(ctx context. // AsState builds state for a RunFunctionRequest from the XR and composed // resources. -func AsState(xr resource.Composite, xc managed.ConnectionDetails, rs ComposedResourceStates) (*v1beta1.State, error) { +func AsState(xr resource.Composite, xc managed.ConnectionDetails, rs ComposedResourceStates) (*fnv1.State, error) { r, err := AsStruct(xr) if err != nil { return nil, errors.Wrap(err, errXRAsStruct) } - oxr := &v1beta1.Resource{Resource: r, ConnectionDetails: xc} + oxr := &fnv1.Resource{Resource: r, ConnectionDetails: xc} - ocds := make(map[string]*v1beta1.Resource) + ocds := make(map[string]*fnv1.Resource) for name, or := range rs { r, err := AsStruct(or.Resource) if err != nil { return nil, errors.Wrapf(err, errFmtCDAsStruct, name) } - ocds[string(name)] = &v1beta1.Resource{Resource: r, ConnectionDetails: or.ConnectionDetails} + ocds[string(name)] = &fnv1.Resource{Resource: r, ConnectionDetails: or.ConnectionDetails} } - return &v1beta1.State{Composite: oxr, Resources: ocds}, nil + return &fnv1.State{Composite: oxr, Resources: ocds}, nil } // AsStruct converts the supplied object to a protocol buffer Struct well-known @@ -864,8 +864,8 @@ func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client. } } -func convertTarget(t v1beta1.Target) CompositionTarget { - if t == v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM { +func convertTarget(t fnv1.Target) CompositionTarget { + if t == fnv1.Target_TARGET_COMPOSITE_AND_CLAIM { return CompositionTargetCompositeAndClaim } return CompositionTargetComposite diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index e4e8bdf1b..cb026d93d 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -45,7 +45,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -196,7 +196,7 @@ func TestFunctionCompose(t *testing.T) { "RunFunctionError": { reason: "We should return any error encountered while running a Composition Function", params: params{ - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { return nil, errBoom }), o: []FunctionComposerOption{ @@ -230,52 +230,52 @@ func TestFunctionCompose(t *testing.T) { "FatalFunctionResultError": { reason: "We should return any fatal function results as an error. Any conditions returned by the function should be passed up. Any results returned by the function prior to the fatal result should be passed up.", params: params{ - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - return &v1beta1.RunFunctionResponse{ - Results: []*v1beta1.Result{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ // This result should be passed up as it was sent before the fatal // result. The reason should be defaulted. The target should be // defaulted. { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Message: "A result before the fatal result with the default Reason.", }, // This result should be passed up as it was sent before the fatal // result. The reason should be kept. The target should be kept. { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Reason: ptr.To("SomeReason"), Message: "A result before the fatal result with a specific Reason.", - Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), }, // The fatal result { - Severity: v1beta1.Severity_SEVERITY_FATAL, + Severity: fnv1.Severity_SEVERITY_FATAL, Message: "oh no", }, // This result should not be passed up as it was sent after the // fatal result. { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Message: "a result after the fatal result", }, }, - Conditions: []*v1beta1.Condition{ + Conditions: []*fnv1.Condition{ // A condition returned by the function with only the minimum // necessary values. { Type: "DatabaseReady", - Status: v1beta1.Status_STATUS_CONDITION_FALSE, + Status: fnv1.Status_STATUS_CONDITION_FALSE, Reason: "Creating", }, // A condition returned by the function with all optional values // given. { Type: "DeploymentReady", - Status: v1beta1.Status_STATUS_CONDITION_TRUE, + Status: fnv1.Status_STATUS_CONDITION_TRUE, Reason: "Available", Message: ptr.To("The deployment is ready."), - Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), }, }, }, nil @@ -356,9 +356,9 @@ func TestFunctionCompose(t *testing.T) { "RenderComposedResourceMetadataError": { reason: "We should return any error we encounter when rendering composed resource metadata", params: params{ - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -367,7 +367,7 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -404,9 +404,9 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockGet: test.NewMockGetFn(errBoom), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -417,7 +417,7 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -453,8 +453,8 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockPatch: test.NewMockPatchFn(nil), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - return &v1beta1.RunFunctionResponse{}, nil + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -501,8 +501,8 @@ func TestFunctionCompose(t *testing.T) { return nil }), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - return &v1beta1.RunFunctionResponse{}, nil + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -542,9 +542,9 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(errBoom), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Composite: &v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStruct(map[string]any{ "status": map[string]any{ "widgets": 42, @@ -552,7 +552,7 @@ func TestFunctionCompose(t *testing.T) { }), }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -602,9 +602,9 @@ func TestFunctionCompose(t *testing.T) { }), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "uncool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -613,7 +613,7 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { @@ -667,10 +667,10 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Desired: &v1beta1.State{ - Composite: &v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStruct(map[string]any{ "status": map[string]any{ "widgets": 42, @@ -678,13 +678,13 @@ func TestFunctionCompose(t *testing.T) { }), ConnectionDetails: map[string][]byte{"from": []byte("function-pipeline")}, }, - Resources: map[string]*v1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "observed-resource-a": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", "kind": "CoolComposed", }), - Ready: v1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, "desired-resource-a": { Resource: MustStruct(map[string]any{ @@ -694,42 +694,42 @@ func TestFunctionCompose(t *testing.T) { }, }, }, - Results: []*v1beta1.Result{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Message: "A normal result", }, { - Severity: v1beta1.Severity_SEVERITY_WARNING, + Severity: fnv1.Severity_SEVERITY_WARNING, Message: "A warning result", }, { - Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, + Severity: fnv1.Severity_SEVERITY_UNSPECIFIED, Message: "A result of unspecified severity", }, { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Reason: ptr.To("SomeReason"), Message: "A result with all values explicitly set.", - Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), }, }, - Conditions: []*v1beta1.Condition{ + Conditions: []*fnv1.Condition{ // A condition returned by the function with only the minimum // necessary values. { Type: "DatabaseReady", - Status: v1beta1.Status_STATUS_CONDITION_FALSE, + Status: fnv1.Status_STATUS_CONDITION_FALSE, Reason: "Creating", }, // A condition returned by the function with all optional values // given. { Type: "DeploymentReady", - Status: v1beta1.Status_STATUS_CONDITION_TRUE, + Status: fnv1.Status_STATUS_CONDITION_TRUE, Reason: "Available", Message: ptr.To("The deployment is ready."), - Target: v1beta1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), }, }, } @@ -1129,7 +1129,7 @@ func TestAsState(t *testing.T) { rs ComposedResourceStates } type want struct { - d *v1beta1.State + d *fnv1.State err error } @@ -1158,15 +1158,15 @@ func TestAsState(t *testing.T) { }, }, want: want{ - d: &v1beta1.State{ - Composite: &v1beta1.Resource{ + d: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: &structpb.Struct{Fields: map[string]*structpb.Value{ "apiVersion": structpb.NewStringValue("example.org/v1"), "kind": structpb.NewStringValue("Composite"), }}, ConnectionDetails: map[string][]byte{"a": []byte("b")}, }, - Resources: map[string]*v1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: &structpb.Struct{Fields: map[string]*structpb.Value{ "apiVersion": structpb.NewStringValue("example.org/v2"), diff --git a/internal/controller/apiextensions/composite/extra_resources.go b/internal/controller/apiextensions/composite/extra_resources.go index 358d83b80..12ed83f17 100644 --- a/internal/controller/apiextensions/composite/extra_resources.go +++ b/internal/controller/apiextensions/composite/extra_resources.go @@ -26,7 +26,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" ) // MaxRequirementsIterations is the maximum number of times a Function should be @@ -49,9 +49,9 @@ func NewFetchingFunctionRunner(r FunctionRunner, f ExtraResourcesFetcher) *Fetch // RunFunction runs a function, repeatedly fetching any extra resources it asks // for. The function may be run up to MaxRequirementsIterations times. -func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { // Used to store the requirements returned at the previous iteration. - var requirements *v1beta1.Requirements + var requirements *fnv1.Requirements for i := int64(0); i <= MaxRequirementsIterations; i++ { rsp, err := c.wrapped.RunFunction(ctx, name, req) @@ -61,7 +61,7 @@ func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, r } for _, rs := range rsp.GetResults() { - if rs.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + if rs.GetSeverity() == fnv1.Severity_SEVERITY_FATAL { // We won't iterate if the function returned a fatal result. return rsp, nil } @@ -77,7 +77,7 @@ func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, r requirements = newRequirements // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*v1beta1.Resources) + req.ExtraResources = make(map[string]*fnv1.Resources) // Fetch the requested resources and add them to the desired state. for name, selector := range newRequirements.GetExtraResources() { @@ -109,12 +109,12 @@ func NewExistingExtraResourcesFetcher(c client.Reader) *ExistingExtraResourcesFe } // Fetch fetches resources requested by functions using the provided client.Reader. -func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { +func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { if rs == nil { return nil, errors.New(errNilResourceSelector) } switch match := rs.GetMatch().(type) { - case *v1beta1.ResourceSelector_MatchName: + case *fnv1.ResourceSelector_MatchName: // Fetch a single resource. r := &kunstructured.Unstructured{} r.SetAPIVersion(rs.GetApiVersion()) @@ -133,8 +133,8 @@ func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.R if err != nil { return nil, errors.Wrap(err, errExtraResourceAsStruct) } - return &v1beta1.Resources{Items: []*v1beta1.Resource{{Resource: o}}}, nil - case *v1beta1.ResourceSelector_MatchLabels: + return &fnv1.Resources{Items: []*fnv1.Resource{{Resource: o}}}, nil + case *fnv1.ResourceSelector_MatchLabels: // Fetch a list of resources. list := &kunstructured.UnstructuredList{} list.SetAPIVersion(rs.GetApiVersion()) @@ -144,16 +144,16 @@ func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.R return nil, errors.Wrap(err, errListExtraResources) } - resources := make([]*v1beta1.Resource, len(list.Items)) + resources := make([]*fnv1.Resource, len(list.Items)) for i, r := range list.Items { o, err := AsStruct(&r) if err != nil { return nil, errors.Wrap(err, errExtraResourceAsStruct) } - resources[i] = &v1beta1.Resource{Resource: o} + resources[i] = &fnv1.Resource{Resource: o} } - return &v1beta1.Resources{Items: resources}, nil + return &fnv1.Resources{Items: resources}, nil } return nil, errors.New(errUnknownResourceSelector) } diff --git a/internal/controller/apiextensions/composite/extra_resources_test.go b/internal/controller/apiextensions/composite/extra_resources_test.go index 9b7a1bf11..1b955136b 100644 --- a/internal/controller/apiextensions/composite/extra_resources_test.go +++ b/internal/controller/apiextensions/composite/extra_resources_test.go @@ -32,7 +32,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" ) var _ FunctionRunner = &FetchingFunctionRunner{} @@ -41,11 +41,11 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { errBoom := errors.New("boom") type args struct { - rs *v1beta1.ResourceSelector + rs *fnv1.ResourceSelector c client.Reader } type want struct { - res *v1beta1.Resources + res *fnv1.Resources err error } cases := map[string]struct { @@ -56,10 +56,10 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { "SuccessMatchName": { reason: "We should return a valid Resources when a resource is found by name", args: args{ - rs: &v1beta1.ResourceSelector{ + rs: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "cool-resource", }, }, @@ -71,8 +71,8 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { }, }, want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ + res: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -89,11 +89,11 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { "SuccessMatchLabels": { reason: "We should return a valid Resources when a resource is found by labels", args: args{ - rs: &v1beta1.ResourceSelector{ + rs: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ Labels: map[string]string{ "cool": "resource", }, @@ -133,8 +133,8 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { }, }, want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ + res: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -166,10 +166,10 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { "NotFoundMatchName": { reason: "We should return no error when a resource is not found by name", args: args{ - rs: &v1beta1.ResourceSelector{ + rs: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "cool-resource", }, }, @@ -186,10 +186,10 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { "ErrorMatchName": { reason: "We should return any other error encountered when getting a resource by name", args: args{ - rs: &v1beta1.ResourceSelector{ + rs: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "cool-resource", }, }, @@ -205,11 +205,11 @@ func TestExistingExtraResourcesFetcherFetch(t *testing.T) { "ErrorMatchLabels": { reason: "We should return any other error encountered when listing resources by labels", args: args{ - rs: &v1beta1.ResourceSelector{ + rs: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ Labels: map[string]string{ "cool": "resource", }, @@ -259,10 +259,10 @@ func TestFetchingFunctionRunner(t *testing.T) { type args struct { ctx context.Context name string - req *v1beta1.RunFunctionRequest + req *fnv1.RunFunctionRequest } type want struct { - rsp *v1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse err error } @@ -275,7 +275,7 @@ func TestFetchingFunctionRunner(t *testing.T) { "RunFunctionError": { reason: "We should return an error if the wrapped FunctionRunner does", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { return nil, errors.New("boom") }), }, @@ -287,11 +287,11 @@ func TestFetchingFunctionRunner(t *testing.T) { "FatalResult": { reason: "We should return early if the function returns a fatal result", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Results: []*v1beta1.Result{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_FATAL, + Severity: fnv1.Severity_SEVERITY_FATAL, }, }, } @@ -300,10 +300,10 @@ func TestFetchingFunctionRunner(t *testing.T) { }, args: args{}, want: want{ - rsp: &v1beta1.RunFunctionResponse{ - Results: []*v1beta1.Result{ + rsp: &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_FATAL, + Severity: fnv1.Severity_SEVERITY_FATAL, }, }, }, @@ -313,11 +313,11 @@ func TestFetchingFunctionRunner(t *testing.T) { "NoRequirements": { reason: "We should return the response unchanged if there are no requirements", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Results: []*v1beta1.Result{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, }, }, } @@ -326,10 +326,10 @@ func TestFetchingFunctionRunner(t *testing.T) { }, args: args{}, want: want{ - rsp: &v1beta1.RunFunctionResponse{ - Results: []*v1beta1.Result{ + rsp: &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, }, }, }, @@ -339,10 +339,10 @@ func TestFetchingFunctionRunner(t *testing.T) { "FetchResourcesError": { reason: "We should return any error encountered when fetching extra resources", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Requirements: &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "gimme": { ApiVersion: "test.crossplane.io/v1", Kind: "CoolResource", @@ -352,12 +352,12 @@ func TestFetchingFunctionRunner(t *testing.T) { } return rsp, nil }), - resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { return nil, errors.New("boom") }), }, args: args{ - req: &v1beta1.RunFunctionRequest{}, + req: &fnv1.RunFunctionRequest{}, }, want: want{ err: cmpopts.AnyError, @@ -366,10 +366,10 @@ func TestFetchingFunctionRunner(t *testing.T) { "RequirementsDidntStabilizeError": { reason: "We should return an error if the function's requirements never stabilize", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Requirements: &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "gimme": { ApiVersion: "test.crossplane.io/v1", @@ -381,12 +381,12 @@ func TestFetchingFunctionRunner(t *testing.T) { } return rsp, nil }), - resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - return &v1beta1.Resources{}, nil + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { + return &fnv1.Resources{}, nil }), }, args: args{ - req: &v1beta1.RunFunctionRequest{}, + req: &fnv1.RunFunctionRequest{}, }, want: want{ err: cmpopts.AnyError, @@ -395,14 +395,14 @@ func TestFetchingFunctionRunner(t *testing.T) { "Success": { reason: "We should return the fetched resources", params: params{ - wrapped: FunctionRunnerFn(func(_ context.Context, _ string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { // We only expect to be sent extra resources the second time // we're called, in response to our requirements. if called { - want := &v1beta1.RunFunctionRequest{ - ExtraResources: map[string]*v1beta1.Resources{ + want := &fnv1.RunFunctionRequest{ + ExtraResources: map[string]*fnv1.Resources{ "gimme": { - Items: []*v1beta1.Resource{{Resource: coolResource}}, + Items: []*fnv1.Resource{{Resource: coolResource}}, }, }, } @@ -415,9 +415,9 @@ func TestFetchingFunctionRunner(t *testing.T) { called = true - rsp := &v1beta1.RunFunctionResponse{ - Requirements: &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "gimme": { ApiVersion: "test.crossplane.io/v1", Kind: "CoolResource", @@ -427,20 +427,20 @@ func TestFetchingFunctionRunner(t *testing.T) { } return rsp, nil }), - resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - r := &v1beta1.Resources{ - Items: []*v1beta1.Resource{{Resource: coolResource}}, + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { + r := &fnv1.Resources{ + Items: []*fnv1.Resource{{Resource: coolResource}}, } return r, nil }), }, args: args{ - req: &v1beta1.RunFunctionRequest{}, + req: &fnv1.RunFunctionRequest{}, }, want: want{ - rsp: &v1beta1.RunFunctionResponse{ - Requirements: &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ + rsp: &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "gimme": { ApiVersion: "test.crossplane.io/v1", Kind: "CoolResource", diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 4444acd5f..6c5ca40bc 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -585,48 +585,41 @@ func (r *Reconciler) CompositeReconcilerOptions(ctx context.Context, d *v1.Compo o = append(o, composite.WithConnectionPublishers(pc...), - composite.WithConfigurator(cc), - composite.WithComposer(composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)))) + composite.WithConfigurator(cc)) } - // If Composition Functions are enabled we use two different Composer - // implementations. One supports P&T (aka 'Resources mode') and the other - // Functions (aka 'Pipeline mode'). - if r.options.Features.Enabled(features.EnableBetaCompositionFunctions) { - ptc := composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)) + // This composer is used for mode: Resources Compositions (the default). + ptc := composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)) - fcopts := []composite.FunctionComposerOption{ - composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.engine.GetClient(), fetcher)), - composite.WithCompositeConnectionDetailsFetcher(fetcher), - } - - var runner composite.FunctionRunner = r.options.FunctionRunner - if r.options.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - runner = composite.NewFetchingFunctionRunner(runner, composite.NewExistingExtraResourcesFetcher(r.engine.GetClient())) - } + // Wrap the PackagedFunctionRunner setup in main with support for loading + // extra resources to satisfy function requirements. + runner := composite.NewFetchingFunctionRunner(r.options.FunctionRunner, composite.NewExistingExtraResourcesFetcher(r.engine.GetClient())) - fc := composite.NewFunctionComposer(r.engine.GetClient(), runner, fcopts...) + // This composer is used for mode: Pipeline Compositions. + fc := composite.NewFunctionComposer(r.engine.GetClient(), runner, + composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.engine.GetClient(), fetcher)), + composite.WithCompositeConnectionDetailsFetcher(fetcher), + ) - // Note that if external secret stores are enabled this will supersede - // the WithComposer option specified in that block. - o = append(o, composite.WithComposer(composite.ComposerSelectorFn(func(cm *v1.CompositionMode) composite.Composer { - // Resources mode is the implicit default. - m := v1.CompositionModeResources - if cm != nil { - m = *cm - } - switch m { - case v1.CompositionModeResources: - return ptc - case v1.CompositionModePipeline: - return fc - default: - // This shouldn't be possible, but just in case return the - // default Composer. - return ptc - } - }))) - } + // We use two different Composer implementations. One supports P&T (aka + // 'Resources mode') and the other Functions (aka 'Pipeline mode'). + o = append(o, composite.WithComposer(composite.ComposerSelectorFn(func(cm *v1.CompositionMode) composite.Composer { + // Resources mode is the implicit default. + m := v1.CompositionModeResources + if cm != nil { + m = *cm + } + switch m { + case v1.CompositionModeResources: + return ptc + case v1.CompositionModePipeline: + return fc + default: + // This shouldn't be possible, but just in case return the + // default Composer. + return ptc + } + }))) // If realtime compositions are enabled we pass the ControllerEngine to the // XR reconciler so that it can start watches for composed resources. diff --git a/internal/controller/pkg/manager/reconciler.go b/internal/controller/pkg/manager/reconciler.go index 0fef6dabc..0b4e6c055 100644 --- a/internal/controller/pkg/manager/reconciler.go +++ b/internal/controller/pkg/manager/reconciler.go @@ -41,7 +41,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/controller/pkg/controller" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -217,10 +216,10 @@ func SetupConfiguration(mgr ctrl.Manager, o controller.Options) error { // SetupFunction adds a controller that reconciles Functions. func SetupFunction(mgr ctrl.Manager, o controller.Options) error { - name := "packages/" + strings.ToLower(v1beta1.FunctionGroupKind) - np := func() v1.Package { return &v1beta1.Function{} } - nr := func() v1.PackageRevision { return &v1beta1.FunctionRevision{} } - nrl := func() v1.PackageRevisionList { return &v1beta1.FunctionRevisionList{} } + name := "packages/" + strings.ToLower(v1.FunctionGroupKind) + np := func() v1.Package { return &v1.Function{} } + nr := func() v1.PackageRevision { return &v1.FunctionRevision{} } + nrl := func() v1.PackageRevisionList { return &v1.FunctionRevisionList{} } cs, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { @@ -242,8 +241,8 @@ func SetupFunction(mgr ctrl.Manager, o controller.Options) error { return ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1beta1.Function{}). - Owns(&v1beta1.FunctionRevision{}). + For(&v1.Function{}). + Owns(&v1.FunctionRevision{}). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(NewReconciler(mgr, opts...)), o.GlobalRateLimiter)) } diff --git a/internal/controller/pkg/pkg.go b/internal/controller/pkg/pkg.go index e6362997f..77cb46da0 100644 --- a/internal/controller/pkg/pkg.go +++ b/internal/controller/pkg/pkg.go @@ -24,7 +24,6 @@ import ( "github.com/crossplane/crossplane/internal/controller/pkg/manager" "github.com/crossplane/crossplane/internal/controller/pkg/resolver" "github.com/crossplane/crossplane/internal/controller/pkg/revision" - "github.com/crossplane/crossplane/internal/features" ) // Setup package controllers. @@ -32,26 +31,16 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { for _, setup := range []func(ctrl.Manager, controller.Options) error{ manager.SetupConfiguration, manager.SetupProvider, + manager.SetupFunction, resolver.Setup, revision.SetupConfigurationRevision, revision.SetupProviderRevision, + revision.SetupFunctionRevision, } { if err := setup(mgr, o); err != nil { return err } } - // We only want to start the Function controllers if Functions are enabled. - if o.Features.Enabled(features.EnableBetaCompositionFunctions) { - for _, setup := range []func(ctrl.Manager, controller.Options) error{ - manager.SetupFunction, - revision.SetupFunctionRevision, - } { - if err := setup(mgr, o); err != nil { - return err - } - } - } - return nil } diff --git a/internal/controller/pkg/resolver/reconciler.go b/internal/controller/pkg/resolver/reconciler.go index 50fb9e92e..c0bc15f9d 100644 --- a/internal/controller/pkg/resolver/reconciler.go +++ b/internal/controller/pkg/resolver/reconciler.go @@ -287,7 +287,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco case v1beta1.ProviderPackageType: pack = &v1.Provider{} case v1beta1.FunctionPackageType: - pack = &v1beta1.Function{} + pack = &v1.Function{} default: log.Debug(errInvalidPackageType) return reconcile.Result{Requeue: false}, nil diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index 3b59a6584..29ab28d84 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -46,7 +46,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/apis/pkg/v1alpha1" "github.com/crossplane/crossplane/apis/pkg/v1beta1" @@ -381,8 +380,8 @@ func SetupConfigurationRevision(mgr ctrl.Manager, o controller.Options) error { // SetupFunctionRevision adds a controller that reconciles FunctionRevisions. func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { - name := "packages/" + strings.ToLower(v1beta1.FunctionRevisionGroupKind) - nr := func() v1.PackageRevision { return &v1beta1.FunctionRevision{} } + name := "packages/" + strings.ToLower(v1.FunctionRevisionGroupKind) + nr := func() v1.PackageRevision { return &v1.FunctionRevision{} } clientset, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { @@ -404,7 +403,7 @@ func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { cb := ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1beta1.FunctionRevision{}). + For(&v1.FunctionRevision{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). Owns(&corev1.Secret{}). @@ -740,7 +739,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } - pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) pmo := pkgMeta.(metav1.Object) //nolint:forcetypeassert // Will always be metav1.Object. meta.AddLabels(pr, pmo.GetLabels()) diff --git a/internal/controller/pkg/revision/runtime.go b/internal/controller/pkg/revision/runtime.go index 8b74113fd..de18952da 100644 --- a/internal/controller/pkg/revision/runtime.go +++ b/internal/controller/pkg/revision/runtime.go @@ -332,7 +332,7 @@ func (b *RuntimeManifestBuilder) packageName() string { } func (b *RuntimeManifestBuilder) packageType() string { - if _, ok := b.revision.(*v1beta1.FunctionRevision); ok { + if _, ok := b.revision.(*v1.FunctionRevision); ok { return "function" } return "provider" diff --git a/internal/controller/pkg/revision/runtime_function.go b/internal/controller/pkg/revision/runtime_function.go index 804e0203c..8eb1597c4 100644 --- a/internal/controller/pkg/revision/runtime_function.go +++ b/internal/controller/pkg/revision/runtime_function.go @@ -31,9 +31,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/resource" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" + pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/initializer" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -91,7 +90,7 @@ func (h *FunctionHooks) Pre(ctx context.Context, _ runtime.Object, pr v1.Package } // N.B.: We expect the revision to be applied by the caller - fRev, ok := pr.(*v1beta1.FunctionRevision) + fRev, ok := pr.(*v1.FunctionRevision) if !ok { return errors.Errorf("cannot apply function package hooks to %T", pr) } @@ -113,8 +112,8 @@ func (h *FunctionHooks) Pre(ctx context.Context, _ runtime.Object, pr v1.Package // Post performs operations meant to happen after establishing objects. func (h *FunctionHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.PackageRevisionWithRuntime, build ManifestBuilder) error { - po, _ := xpkg.TryConvert(pkg, &pkgmetav1beta1.Function{}) - functionMeta, ok := po.(*pkgmetav1beta1.Function) + po, _ := xpkg.TryConvert(pkg, &pkgmetav1.Function{}) + functionMeta, ok := po.(*pkgmetav1.Function) if !ok { return errors.New(errNotFunction) } @@ -206,7 +205,7 @@ func functionServiceOverrides() []ServiceOverride { // getFunctionImage determines a complete function image, taking into account a // default registry. If the function meta specifies an image, we have a // preference for that image over what is specified in the package revision. -func getFunctionImage(fm *pkgmetav1beta1.Function, pr v1.PackageRevisionWithRuntime, defaultRegistry string) (string, error) { +func getFunctionImage(fm *pkgmetav1.Function, pr v1.PackageRevisionWithRuntime, defaultRegistry string) (string, error) { image := pr.GetSource() if fm.Spec.Image != nil { image = *fm.Spec.Image diff --git a/internal/controller/pkg/revision/runtime_function_test.go b/internal/controller/pkg/revision/runtime_function_test.go index 0c9f5f73f..178af2816 100644 --- a/internal/controller/pkg/revision/runtime_function_test.go +++ b/internal/controller/pkg/revision/runtime_function_test.go @@ -33,9 +33,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/test" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" + pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -60,11 +59,11 @@ func TestFunctionPreHook(t *testing.T) { "Success": { reason: "Successful run of pre hook.", args: args{ - pkg: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{}, + pkg: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{}, }, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionActive, }, @@ -98,8 +97,8 @@ func TestFunctionPreHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionActive, }, @@ -107,7 +106,7 @@ func TestFunctionPreHook(t *testing.T) { TLSServerSecretName: ptr.To("some-server-secret"), }, }, - Status: v1beta1.FunctionRevisionStatus{ + Status: v1.FunctionRevisionStatus{ Endpoint: fmt.Sprintf(serviceEndpointFmt, "some-service", "some-namespace", servicePort), }, }, @@ -157,9 +156,9 @@ func TestFunctionPostHook(t *testing.T) { "FunctionInactive": { reason: "Should do nothing if function revision is inactive.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionInactive, }, @@ -167,8 +166,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionInactive, }, @@ -179,9 +178,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrApplySA": { reason: "Should return error if we fail to apply service account for active function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -206,8 +205,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -220,9 +219,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrApplyDeployment": { reason: "Should return error if we fail to apply deployment for active function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -250,8 +249,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -264,9 +263,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrDeploymentNoAvailableConditionYet": { reason: "Should return error if deployment for active function revision has no available condition yet.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -291,8 +290,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -305,9 +304,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrUnavailableDeployment": { reason: "Should return error if deployment is unavailable for function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -340,8 +339,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -354,9 +353,9 @@ func TestFunctionPostHook(t *testing.T) { "Successful": { reason: "Should not return error if successfully applied service account and deployment for active function revision and the deployment is ready.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -388,8 +387,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -401,9 +400,9 @@ func TestFunctionPostHook(t *testing.T) { "SuccessWithExtraSecret": { reason: "Should not return error if successfully applied service account with additional secret.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -438,8 +437,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -451,9 +450,9 @@ func TestFunctionPostHook(t *testing.T) { "SuccessfulWithExternallyManagedSA": { reason: "Should be successful without creating an SA, when the SA is managed externally", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -503,8 +502,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -636,8 +635,8 @@ func TestFunctionDeactivateHook(t *testing.T) { func TestGetFunctionImage(t *testing.T) { type args struct { - functionMeta *pkgmetav1beta1.Function - functionRevision *v1beta1.FunctionRevision + functionMeta *pkgmetav1.Function + functionRevision *v1.FunctionRevision defaultRegistry string } @@ -654,13 +653,13 @@ func TestGetFunctionImage(t *testing.T) { "NoOverrideFromMeta": { reason: "Should use the image from the package revision and add default registry when no override is present.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: nil, }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "crossplane/func-bar:v1.2.3", }, @@ -676,13 +675,13 @@ func TestGetFunctionImage(t *testing.T) { "WithOverrideFromMeta": { reason: "Should use the override from the function meta when present and add default registry.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: ptr.To("crossplane/func-bar-server:v1.2.3"), }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "crossplane/func-bar:v1.2.3", }, @@ -698,13 +697,13 @@ func TestGetFunctionImage(t *testing.T) { "RegistrySpecified": { reason: "Should honor the registry as specified on the package, even if its different than the default registry.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: nil, }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "registry.notdefault.io/crossplane/func-bar:v1.2.3", }, diff --git a/internal/controller/pkg/revision/runtime_test.go b/internal/controller/pkg/revision/runtime_test.go index a568215f7..731bf55bc 100644 --- a/internal/controller/pkg/revision/runtime_test.go +++ b/internal/controller/pkg/revision/runtime_test.go @@ -71,7 +71,7 @@ var ( }, } - functionRevision = &v1beta1.FunctionRevision{ + functionRevision = &v1.FunctionRevision{ TypeMeta: metav1.TypeMeta{ APIVersion: "pkg.crossplane.io/v1beta1", Kind: "FunctionRevision", @@ -82,7 +82,7 @@ var ( v1.LabelParentPackage: functionName, }, }, - Spec: v1beta1.FunctionRevisionSpec{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, }, diff --git a/internal/controller/pkg/revision/watch.go b/internal/controller/pkg/revision/watch.go index 3b9c838e7..adfd80544 100644 --- a/internal/controller/pkg/revision/watch.go +++ b/internal/controller/pkg/revision/watch.go @@ -137,7 +137,7 @@ func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, return } - l := &v1beta1.FunctionRevisionList{} + l := &v1.FunctionRevisionList{} if err := e.client.List(ctx, l); err != nil { // TODO(hasheddan): Handle this error? return diff --git a/internal/features/features.go b/internal/features/features.go index bbb0dd342..f1514f19a 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -49,16 +49,6 @@ const ( // Beta Feature Flags. const ( - // EnableBetaCompositionFunctions enables alpha support for composition - // functions. See the below design for more details. - // https://github.com/crossplane/crossplane/blob/863ff6/design/design-doc-composition-functions.md - EnableBetaCompositionFunctions feature.Flag = "EnableBetaCompositionFunctions" - - // EnableBetaCompositionFunctionsExtraResources enables extra resources support for - // composition functions. See the below design for more details. - // https://github.com/crossplane/crossplane/blob/863ff6/design/design-doc-composition-functions.md - EnableBetaCompositionFunctionsExtraResources feature.Flag = "EnableBetaCompositionFunctionsExtraResources" - // EnableBetaCompositionWebhookSchemaValidation enables alpha support for // composition webhook schema validation. See the below design for more // details. diff --git a/internal/initializer/installer.go b/internal/initializer/installer.go index fd18584d3..2242a8d4d 100644 --- a/internal/initializer/installer.go +++ b/internal/initializer/installer.go @@ -27,7 +27,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -90,7 +89,7 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { } cMap[xpkg.ParsePackageSourceFromReference(ref)] = c.GetName() } - fl := &v1beta1.FunctionList{} + fl := &v1.FunctionList{} if err := kube.List(ctx, fl); err != nil && !kerrors.IsNotFound(err) { return errors.Wrap(err, errListFunctions) } @@ -122,7 +121,7 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { pkgsIdx++ } for _, img := range pi.functions { - f := &v1beta1.Function{} + f := &v1.Function{} if err := buildPack(f, img, fMap); err != nil { return err } diff --git a/internal/initializer/installer_test.go b/internal/initializer/installer_test.go index e27555101..143df84c6 100644 --- a/internal/initializer/installer_test.go +++ b/internal/initializer/installer_test.go @@ -31,7 +31,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) const ( @@ -113,14 +112,14 @@ func TestInstaller(t *testing.T) { }, }, } - case *v1beta1.FunctionList: - *l = v1beta1.FunctionList{ - Items: []v1beta1.Function{ + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ { ObjectMeta: metav1.ObjectMeta{ Name: f1Name, }, - Spec: v1beta1.FunctionSpec{ + Spec: v1.FunctionSpec{ PackageSpec: v1.PackageSpec{ Package: f1, }, @@ -143,7 +142,7 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } - case *v1beta1.Function: + case *v1.Function: if key.Name != f1Name { t.Errorf(errFmtGetFunction, key.Name) } @@ -162,7 +161,7 @@ func TestInstaller(t *testing.T) { if obj.GetName() != c1Name { t.Errorf(errFmtPatchConfiguration, obj.GetName()) } - case *v1beta1.Function: + case *v1.Function: if obj.GetName() != f1Name { t.Errorf(errFmtPatchFunction, obj.GetName()) } @@ -212,14 +211,14 @@ func TestInstaller(t *testing.T) { }, }, } - case *v1beta1.FunctionList: - *l = v1beta1.FunctionList{ - Items: []v1beta1.Function{ + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ { ObjectMeta: metav1.ObjectMeta{ Name: f1Existing, }, - Spec: v1beta1.FunctionSpec{ + Spec: v1.FunctionSpec{ PackageSpec: v1.PackageSpec{ Package: fmt.Sprintf("%s:%s", f1Repo, "v100.100.100"), }, @@ -242,7 +241,7 @@ func TestInstaller(t *testing.T) { if key.Name != c1Existing { t.Errorf(errFmtGetConfiguration, key.Name) } - case *v1beta1.Function: + case *v1.Function: if key.Name != f1Existing { t.Errorf(errFmtGetFunction, key.Name) } @@ -267,7 +266,7 @@ func TestInstaller(t *testing.T) { if o.GetSource() != c1 { t.Errorf(errFmtPatchConfigurationSource, o.GetSource()) } - case *v1beta1.Function: + case *v1.Function: if o.GetName() != f1Existing { t.Errorf(errFmtPatchFunction, o.GetName()) } @@ -301,7 +300,7 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } - case *v1beta1.Function: + case *v1.Function: if key.Name != f1Name { t.Errorf(errFmtGetFunction, key.Name) } @@ -351,14 +350,14 @@ func TestInstaller(t *testing.T) { } case *v1.ConfigurationList: return nil - case *v1beta1.FunctionList: - *l = v1beta1.FunctionList{ - Items: []v1beta1.Function{ + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ { ObjectMeta: metav1.ObjectMeta{ Name: "other-function", }, - Spec: v1beta1.FunctionSpec{ + Spec: v1.FunctionSpec{ PackageSpec: v1.PackageSpec{ Package: fmt.Sprintf("%s:%s", "other-repo", "v100.100.100"), }, @@ -368,7 +367,7 @@ func TestInstaller(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "another-function", }, - Spec: v1beta1.FunctionSpec{ + Spec: v1.FunctionSpec{ PackageSpec: v1.PackageSpec{ Package: "preloaded-source", }, @@ -391,7 +390,7 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } - case *v1beta1.Function: + case *v1.Function: if key.Name != f1Name { t.Errorf(errFmtGetFunction, key.Name) } @@ -423,7 +422,7 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf("unexpected name in configuration apply") } - case *v1beta1.Function: + case *v1.Function: t.Errorf("no functions specified") default: t.Errorf("unexpected type") diff --git a/internal/xfn/function_runner.go b/internal/xfn/function_runner.go index 762c41b56..46872e27f 100644 --- a/internal/xfn/function_runner.go +++ b/internal/xfn/function_runner.go @@ -30,9 +30,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) // Error strings. @@ -127,7 +126,7 @@ func NewPackagedFunctionRunner(c client.Reader, o ...PackagedFunctionRunnerOptio // RunFunction sends the supplied RunFunctionRequest to the named Function. The // function is expected to be an installed Function.pkg.crossplane.io package. -func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { conn, err := r.getClientConn(ctx, name) if err != nil { return nil, errors.Wrapf(err, errFmtGetClientConn, name) @@ -137,7 +136,7 @@ func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, r ctx, cancel := context.WithTimeout(ctx, runFunctionTimeout) defer cancel() - rsp, err := v1beta1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) + rsp, err := fnv1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) return rsp, errors.Wrapf(err, errFmtRunFunction, name) } @@ -167,12 +166,12 @@ func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, r func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) (*grpc.ClientConn, error) { log := r.log.WithValues("function", name) - l := &pkgv1beta1.FunctionRevisionList{} + l := &pkgv1.FunctionRevisionList{} if err := r.client.List(ctx, l, client.MatchingLabels{pkgv1.LabelParentPackage: name}); err != nil { return nil, errors.Wrapf(err, errListFunctionRevisions) } - var active *pkgv1beta1.FunctionRevision + var active *pkgv1.FunctionRevision for i := range l.Items { if l.Items[i].GetDesiredState() == pkgv1.PackageRevisionActive { active = &l.Items[i] @@ -280,7 +279,7 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex r.connsMx.Lock() defer r.connsMx.Unlock() - l := &pkgv1beta1.FunctionList{} + l := &pkgv1.FunctionList{} if err := r.client.List(ctx, l); err != nil { return 0, errors.Wrap(err, errListFunctions) } diff --git a/internal/xfn/function_runner_metrics.go b/internal/xfn/function_runner_metrics.go index 7e0dfa762..d3d0b91a7 100644 --- a/internal/xfn/function_runner_metrics.go +++ b/internal/xfn/function_runner_metrics.go @@ -23,7 +23,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/status" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" ) // Metrics are requests, errors, and duration (RED) metrics for composition @@ -97,16 +97,16 @@ func (m *Metrics) CreateInterceptor(name, pkg string) grpc.UnaryClientIntercepto // no fatal results, has severity "Warning". A response with fatal // results has severity "Fatal". l["result_severity"] = "Normal" - if rsp, ok := reply.(*v1beta1.RunFunctionResponse); ok { + if rsp, ok := reply.(*fnv1.RunFunctionResponse); ok { for _, r := range rsp.GetResults() { // Keep iterating if we see a warning result - we might still // see a fatal result. - if r.GetSeverity() == v1beta1.Severity_SEVERITY_WARNING { + if r.GetSeverity() == fnv1.Severity_SEVERITY_WARNING { l["result_severity"] = "Warning" } // Break if we see a fatal result, to ensure we don't downgrade // the severity to warning. - if r.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + if r.GetSeverity() == fnv1.Severity_SEVERITY_FATAL { l["result_severity"] = "Fatal" break } diff --git a/internal/xfn/function_runner_test.go b/internal/xfn/function_runner_test.go index a2a417388..7fc4e95ef 100644 --- a/internal/xfn/function_runner_test.go +++ b/internal/xfn/function_runner_test.go @@ -31,9 +31,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) func TestRunFunction(t *testing.T) { @@ -49,10 +48,10 @@ func TestRunFunction(t *testing.T) { type args struct { ctx context.Context name string - req *v1beta1.RunFunctionRequest + req *fnv1.RunFunctionRequest } type want struct { - rsp *v1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse err error } cases := map[string]struct { @@ -81,9 +80,9 @@ func TestRunFunction(t *testing.T) { params: params{ c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionRevisionList).Items = []pkgv1beta1.FunctionRevision{ + obj.(*pkgv1.FunctionRevisionList).Items = []pkgv1.FunctionRevision{ { - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionInactive, // This revision is not active. }, @@ -107,17 +106,17 @@ func TestRunFunction(t *testing.T) { params: params{ c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionRevisionList).Items = []pkgv1beta1.FunctionRevision{ + obj.(*pkgv1.FunctionRevisionList).Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: "", // An empty endpoint. }, }, @@ -140,28 +139,28 @@ func TestRunFunction(t *testing.T) { c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) listeners = append(listeners, lis) - l, ok := obj.(*pkgv1beta1.FunctionRevisionList) + l, ok := obj.(*pkgv1.FunctionRevisionList) if !ok { // If we're called to list Functions we want to // return none, to make sure we GC everything. return nil } - l.Items = []pkgv1beta1.FunctionRevision{ + l.Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: strings.Replace(lis.Addr().String(), "127.0.0.1", "dns:///localhost", 1), }, }, @@ -173,11 +172,11 @@ func TestRunFunction(t *testing.T) { args: args{ ctx: context.Background(), name: "cool-fn", - req: &v1beta1.RunFunctionRequest{}, + req: &fnv1.RunFunctionRequest{}, }, want: want{ - rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }, }, }, @@ -217,8 +216,8 @@ func TestGetClientConn(t *testing.T) { // test some cases that don't fit well in our usual table-driven format. // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis.Close() @@ -256,8 +255,8 @@ func TestGetClientConn(t *testing.T) { }) // Start another gRPC server. - lis2 := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis2 := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis2.Close() @@ -289,8 +288,8 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { // table-driven format. // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis.Close() @@ -312,7 +311,7 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { t.Run("FunctionStillExistsDoNotGarbageCollect", func(t *testing.T) { c.MockList = test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionList).Items = []pkgv1beta1.Function{ + obj.(*pkgv1.FunctionList).Items = []pkgv1.Function{ { // This Function exists! ObjectMeta: metav1.ObjectMeta{Name: "cool-fn"}, @@ -348,23 +347,23 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { func NewListFn(target string) test.MockListFn { return test.NewMockListFn(nil, func(obj client.ObjectList) error { - l, ok := obj.(*pkgv1beta1.FunctionRevisionList) + l, ok := obj.(*pkgv1.FunctionRevisionList) if !ok { // If we're called to list Functions we want to // return none, to make sure we GC everything. return nil } - l.Items = []pkgv1beta1.FunctionRevision{ + l.Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: target, }, }, @@ -373,7 +372,7 @@ func NewListFn(target string) test.MockListFn { }) } -func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Listener { +func NewGRPCServer(t *testing.T, ss fnv1.FunctionRunnerServiceServer) net.Listener { t.Helper() // Listen on a random port. @@ -386,7 +385,7 @@ func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Lis // TODO(negz): Is it worth using a WaitGroup for these? go func() { s := grpc.NewServer() - v1beta1.RegisterFunctionRunnerServiceServer(s, ss) + fnv1.RegisterFunctionRunnerServiceServer(s, ss) _ = s.Serve(lis) }() @@ -395,12 +394,12 @@ func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Lis } type MockFunctionServer struct { - v1beta1.UnimplementedFunctionRunnerServiceServer + fnv1.UnimplementedFunctionRunnerServiceServer - rsp *v1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse err error } -func (s *MockFunctionServer) RunFunction(context.Context, *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (s *MockFunctionServer) RunFunction(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { return s.rsp, s.err } diff --git a/internal/xpkg/lint.go b/internal/xpkg/lint.go index c8f78a1c8..40860334b 100644 --- a/internal/xpkg/lint.go +++ b/internal/xpkg/lint.go @@ -108,7 +108,7 @@ func IsFunction(o runtime.Object) error { // compatible with the package constraints. func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { return func(o runtime.Object) error { - p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) if !ok { return errors.New(errNotMeta) } @@ -129,7 +129,7 @@ func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { // PackageValidSemver checks that the package uses valid semver ranges. func PackageValidSemver(o runtime.Object) error { - p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) if !ok { return errors.New(errNotMeta) } From 78d6d1a8b8fef56bdd3a522856a2d5c82685079f Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 14 Aug 2024 17:40:45 -0700 Subject: [PATCH 340/370] Promote render and xpkg init CLI commands to GA Signed-off-by: Nic Cope --- cmd/crank/beta/beta.go | 4 -- cmd/crank/beta/validate/cmd.go | 4 +- cmd/crank/beta/xpkg/xpkg.go | 39 ------------------- cmd/crank/main.go | 4 +- cmd/crank/{beta => }/render/cmd.go | 8 ++-- cmd/crank/{beta => }/render/load.go | 0 cmd/crank/{beta => }/render/load_test.go | 0 cmd/crank/{beta => }/render/render.go | 0 cmd/crank/{beta => }/render/render_test.go | 0 cmd/crank/{beta => }/render/runtime.go | 0 .../{beta => }/render/runtime_development.go | 0 cmd/crank/{beta => }/render/runtime_docker.go | 0 .../{beta => }/render/runtime_docker_test.go | 0 .../render/testdata/composition.yaml | 0 .../render/testdata/extra-resources.yaml | 0 .../{beta => }/render/testdata/functions.yaml | 0 .../{beta => }/render/testdata/observed.yaml | 0 cmd/crank/{beta => }/render/testdata/xr.yaml | 0 cmd/crank/{beta => }/xpkg/init.go | 8 ++-- cmd/crank/{beta => }/xpkg/init_test.go | 0 cmd/crank/{beta => }/xpkg/testdata/NOTES.txt | 0 21 files changed, 13 insertions(+), 54 deletions(-) delete mode 100644 cmd/crank/beta/xpkg/xpkg.go rename cmd/crank/{beta => }/render/cmd.go (97%) rename cmd/crank/{beta => }/render/load.go (100%) rename cmd/crank/{beta => }/render/load_test.go (100%) rename cmd/crank/{beta => }/render/render.go (100%) rename cmd/crank/{beta => }/render/render_test.go (100%) rename cmd/crank/{beta => }/render/runtime.go (100%) rename cmd/crank/{beta => }/render/runtime_development.go (100%) rename cmd/crank/{beta => }/render/runtime_docker.go (100%) rename cmd/crank/{beta => }/render/runtime_docker_test.go (100%) rename cmd/crank/{beta => }/render/testdata/composition.yaml (100%) rename cmd/crank/{beta => }/render/testdata/extra-resources.yaml (100%) rename cmd/crank/{beta => }/render/testdata/functions.yaml (100%) rename cmd/crank/{beta => }/render/testdata/observed.yaml (100%) rename cmd/crank/{beta => }/render/testdata/xr.yaml (100%) rename cmd/crank/{beta => }/xpkg/init.go (97%) rename cmd/crank/{beta => }/xpkg/init_test.go (100%) rename cmd/crank/{beta => }/xpkg/testdata/NOTES.txt (100%) diff --git a/cmd/crank/beta/beta.go b/cmd/crank/beta/beta.go index 9f027f0e2..1d3540abf 100644 --- a/cmd/crank/beta/beta.go +++ b/cmd/crank/beta/beta.go @@ -21,11 +21,9 @@ package beta import ( "github.com/crossplane/crossplane/cmd/crank/beta/convert" - "github.com/crossplane/crossplane/cmd/crank/beta/render" "github.com/crossplane/crossplane/cmd/crank/beta/top" "github.com/crossplane/crossplane/cmd/crank/beta/trace" "github.com/crossplane/crossplane/cmd/crank/beta/validate" - "github.com/crossplane/crossplane/cmd/crank/beta/xpkg" ) // Cmd contains beta commands. @@ -33,10 +31,8 @@ type Cmd struct { // Subcommands and flags will appear in the CLI help output in the same // order they're specified here. Keep them in alphabetical order. Convert convert.Cmd `cmd:"" help:"Convert a Crossplane resource to a newer version or kind."` - Render render.Cmd `cmd:"" help:"Render a composite resource (XR)."` Top top.Cmd `cmd:"" help:"Display resource (CPU/memory) usage by Crossplane related pods."` Trace trace.Cmd `cmd:"" help:"Trace a Crossplane resource to get a detailed output of its relationships, helpful for troubleshooting."` - XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` Validate validate.Cmd `cmd:"" help:"Validate Crossplane resources."` } diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index 2a7a372a0..642ff7826 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -47,7 +47,7 @@ type Cmd struct { func (c *Cmd) Help() string { return ` This command validates the provided Crossplane resources against the schemas of the provided extensions like XRDs, -CRDs, providers, and configurations. The output of the "crossplane beta render" command can be +CRDs, providers, and configurations. The output of the "crossplane render" command can be piped to this validate command in order to rapidly validate on the outputs of the composition development experience. If providers or configurations are provided as extensions, they will be downloaded and loaded as CRDs before performing @@ -67,7 +67,7 @@ Examples: crossplane beta validate extensionsDir/ resourceDir/ --skip-success-results # Validate the output of the render command against the extensions in the extensionsDir folder - crossplane beta render xr.yaml composition.yaml func.yaml --include-full-xr | crossplane beta validate extensionsDir/ - + crossplane render xr.yaml composition.yaml func.yaml --include-full-xr | crossplane beta validate extensionsDir/ - # Validate all resources in the resourceDir folder against the extensions in the extensionsDir folder using provided # cache directory and clean the cache directory before downloading schemas diff --git a/cmd/crank/beta/xpkg/xpkg.go b/cmd/crank/beta/xpkg/xpkg.go deleted file mode 100644 index 52b3897bb..000000000 --- a/cmd/crank/beta/xpkg/xpkg.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package xpkg contains beta Crossplane packaging commands. -package xpkg - -// Cmd contains commands for interacting with packages. -type Cmd struct { - // Keep commands sorted alphabetically. - Init initCmd `cmd:"" help:"Initialize a new package from a template."` -} - -// Help prints out the help for the xpkg command. -func (c *Cmd) Help() string { - return ` -Crossplane can be extended using packages. Crossplane packages are called xpkgs. -Crossplane supports configuration, provider and function packages. - -A package is an opinionated OCI image that contains everything needed to extend -a Crossplane control plane with new functionality. For example installing a -provider package extends Crossplane with support for new kinds of managed -resource (MR). - -See https://docs.crossplane.io/latest/concepts/packages for more information. -` -} diff --git a/cmd/crank/main.go b/cmd/crank/main.go index f6c52dd43..ff4594f24 100644 --- a/cmd/crank/main.go +++ b/cmd/crank/main.go @@ -24,6 +24,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane/cmd/crank/beta" + "github.com/crossplane/crossplane/cmd/crank/render" "github.com/crossplane/crossplane/cmd/crank/version" "github.com/crossplane/crossplane/cmd/crank/xpkg" ) @@ -46,7 +47,8 @@ type cli struct { // order they're specified here. Keep them in alphabetical order. // Subcommands. - XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` + XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` + Render render.Cmd `cmd:"" help:"Render a composite resource (XR)."` // The alpha and beta subcommands are intentionally in a separate block. We // want them to appear after all other subcommands. diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/render/cmd.go similarity index 97% rename from cmd/crank/beta/render/cmd.go rename to cmd/crank/render/cmd.go index c11e51c80..ee50633bc 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/render/cmd.go @@ -96,18 +96,18 @@ to the Docker daemon. Examples: # Simulate creating a new XR. - crossplane beta render xr.yaml composition.yaml functions.yaml + crossplane render xr.yaml composition.yaml functions.yaml # Simulate updating an XR that already exists. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --observed-resources=existing-observed-resources.yaml # Pass context values to the Function pipeline. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --context-values=apiextensions.crossplane.io/environment='{"key": "value"}' # Pass extra resources Functions in the pipeline can request. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --extra-resources=extra-resources.yaml ` } diff --git a/cmd/crank/beta/render/load.go b/cmd/crank/render/load.go similarity index 100% rename from cmd/crank/beta/render/load.go rename to cmd/crank/render/load.go diff --git a/cmd/crank/beta/render/load_test.go b/cmd/crank/render/load_test.go similarity index 100% rename from cmd/crank/beta/render/load_test.go rename to cmd/crank/render/load_test.go diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/render/render.go similarity index 100% rename from cmd/crank/beta/render/render.go rename to cmd/crank/render/render.go diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/render/render_test.go similarity index 100% rename from cmd/crank/beta/render/render_test.go rename to cmd/crank/render/render_test.go diff --git a/cmd/crank/beta/render/runtime.go b/cmd/crank/render/runtime.go similarity index 100% rename from cmd/crank/beta/render/runtime.go rename to cmd/crank/render/runtime.go diff --git a/cmd/crank/beta/render/runtime_development.go b/cmd/crank/render/runtime_development.go similarity index 100% rename from cmd/crank/beta/render/runtime_development.go rename to cmd/crank/render/runtime_development.go diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/render/runtime_docker.go similarity index 100% rename from cmd/crank/beta/render/runtime_docker.go rename to cmd/crank/render/runtime_docker.go diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/render/runtime_docker_test.go similarity index 100% rename from cmd/crank/beta/render/runtime_docker_test.go rename to cmd/crank/render/runtime_docker_test.go diff --git a/cmd/crank/beta/render/testdata/composition.yaml b/cmd/crank/render/testdata/composition.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/composition.yaml rename to cmd/crank/render/testdata/composition.yaml diff --git a/cmd/crank/beta/render/testdata/extra-resources.yaml b/cmd/crank/render/testdata/extra-resources.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/extra-resources.yaml rename to cmd/crank/render/testdata/extra-resources.yaml diff --git a/cmd/crank/beta/render/testdata/functions.yaml b/cmd/crank/render/testdata/functions.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/functions.yaml rename to cmd/crank/render/testdata/functions.yaml diff --git a/cmd/crank/beta/render/testdata/observed.yaml b/cmd/crank/render/testdata/observed.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/observed.yaml rename to cmd/crank/render/testdata/observed.yaml diff --git a/cmd/crank/beta/render/testdata/xr.yaml b/cmd/crank/render/testdata/xr.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/xr.yaml rename to cmd/crank/render/testdata/xr.yaml diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/xpkg/init.go similarity index 97% rename from cmd/crank/beta/xpkg/init.go rename to cmd/crank/xpkg/init.go index f82b7cb49..09a88245d 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/xpkg/init.go @@ -84,14 +84,14 @@ script without prompting. Examples: # Initialize a new Go Composition Function named function-example. - crossplane beta xpkg init function-example function-template-go - + crossplane xpkg init function-example function-template-go + # Initialize a new Provider named provider-example from a custom template. - crossplane beta xpkg init provider-example https://github.com/crossplane/provider-template-custom + crossplane xpkg init provider-example https://github.com/crossplane/provider-template-custom # Initialize a new Go Composition Function named function-example and run # its init.sh script (if it exists) without prompting the user or displaying its contents. - crossplane beta xpkg init function-example function-template-go --run-init-script + crossplane xpkg init function-example function-template-go --run-init-script ` b := strings.Builder{} diff --git a/cmd/crank/beta/xpkg/init_test.go b/cmd/crank/xpkg/init_test.go similarity index 100% rename from cmd/crank/beta/xpkg/init_test.go rename to cmd/crank/xpkg/init_test.go diff --git a/cmd/crank/beta/xpkg/testdata/NOTES.txt b/cmd/crank/xpkg/testdata/NOTES.txt similarity index 100% rename from cmd/crank/beta/xpkg/testdata/NOTES.txt rename to cmd/crank/xpkg/testdata/NOTES.txt From fd8b00197f97a79415c66ec990788ae9d696d209 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 15 Aug 2024 18:30:19 -0700 Subject: [PATCH 341/370] Update storage versions for Functions and FunctionRevisions to v1 Signed-off-by: Nic Cope --- cmd/crossplane/core/init.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crossplane/core/init.go b/cmd/crossplane/core/init.go index f694ada56..52a784bc3 100644 --- a/cmd/crossplane/core/init.go +++ b/cmd/crossplane/core/init.go @@ -74,6 +74,8 @@ func (c *initCommand) Run(s *runtime.Scheme, log logging.Logger) error { initializer.NewTLSCertificateGenerator(c.Namespace, c.TLSCASecretName, tlsGeneratorOpts...), initializer.NewCoreCRDsMigrator("compositionrevisions.apiextensions.crossplane.io", "v1alpha1"), initializer.NewCoreCRDsMigrator("locks.pkg.crossplane.io", "v1alpha1"), + initializer.NewCoreCRDsMigrator("functions.pkg.crossplane.io", "v1beta1"), + initializer.NewCoreCRDsMigrator("functionrevisions.pkg.crossplane.io", "v1beta1"), ) if c.WebhookEnabled { nn := types.NamespacedName{ From b37968e7616f24037ba9f4490daf6da53a8f3a58 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Wed, 14 Aug 2024 23:16:11 -0700 Subject: [PATCH 342/370] Fall back to v1beta1 if a function doesn't implement v1 We want to promote the v1beta1 function proto to v1. This isn't a breaking change. There's actually no changes - we just want to signal that it's reached v1. Crossplane (the client) must continue to support v1beta1 functions (i.e. servers) in order to remain compatible with existing functions before they update to SDKs that use the v1 protobuf. To do this, Crossplane will first attempt to send a v1 RPC. If the server responds that v1 is unimplemented, it'll fall back to v1beta1. The v1beta1 protos are now automatically replicated from v1. So all changes made to v1 will be propagated to v1beta1. This means Crossplane can work entirely with v1 types internally. When it needs to fall back to send a v1beta1 RunFunctionRequest, it converts a v1 request by serializing it to bytes, then deserializing it into a v1beta1 request. As far as I can tell this is the best way to convert a Go protobuf message to an identical message of a different type. Signed-off-by: Nic Cope --- .../v1beta1/zz_generated_run_function.pb.go | 1854 +++++++++++++++++ .../v1beta1/zz_generated_run_function.proto | 328 +++ .../zz_generated_run_function_grpc.pb.go | 128 ++ ...zz_generated.composition_revision_types.go | 32 +- apis/generate.go | 4 + .../pkg/meta/v1beta1/zz_generated.deepcopy.go | 30 + cmd/crank/render/render.go | 5 +- hack/duplicate_proto_type.sh | 30 + internal/xfn/function_runner.go | 71 +- internal/xfn/function_runner_test.go | 82 + 10 files changed, 2549 insertions(+), 15 deletions(-) create mode 100644 apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go create mode 100644 apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto create mode 100644 apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go create mode 100755 hack/duplicate_proto_type.sh diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go new file mode 100644 index 000000000..d7a926f03 --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go @@ -0,0 +1,1854 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Ready indicates whether a composed resource should be considered ready. +type Ready int32 + +const ( + Ready_READY_UNSPECIFIED Ready = 0 + // True means the composed resource has been observed to be ready. + Ready_READY_TRUE Ready = 1 + // False means the composed resource has not been observed to be ready. + Ready_READY_FALSE Ready = 2 +) + +// Enum value maps for Ready. +var ( + Ready_name = map[int32]string{ + 0: "READY_UNSPECIFIED", + 1: "READY_TRUE", + 2: "READY_FALSE", + } + Ready_value = map[string]int32{ + "READY_UNSPECIFIED": 0, + "READY_TRUE": 1, + "READY_FALSE": 2, + } +) + +func (x Ready) Enum() *Ready { + p := new(Ready) + *p = x + return p +} + +func (x Ready) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ready) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[0].Descriptor() +} + +func (Ready) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[0] +} + +func (x Ready) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ready.Descriptor instead. +func (Ready) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{0} +} + +// Severity of Function results. +type Severity int32 + +const ( + Severity_SEVERITY_UNSPECIFIED Severity = 0 + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + Severity_SEVERITY_FATAL Severity = 1 + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + Severity_SEVERITY_WARNING Severity = 2 + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + Severity_SEVERITY_NORMAL Severity = 3 +) + +// Enum value maps for Severity. +var ( + Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "SEVERITY_FATAL", + 2: "SEVERITY_WARNING", + 3: "SEVERITY_NORMAL", + } + Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "SEVERITY_FATAL": 1, + "SEVERITY_WARNING": 2, + "SEVERITY_NORMAL": 3, + } +) + +func (x Severity) Enum() *Severity { + p := new(Severity) + *p = x + return p +} + +func (x Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Severity) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[1].Descriptor() +} + +func (Severity) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[1] +} + +func (x Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Severity.Descriptor instead. +func (Severity) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{1} +} + +// Target of Function results and conditions. +type Target int32 + +const ( + // If the target is unspecified, the result targets the composite resource. + Target_TARGET_UNSPECIFIED Target = 0 + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + Target_TARGET_COMPOSITE Target = 1 + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + Target_TARGET_COMPOSITE_AND_CLAIM Target = 2 +) + +// Enum value maps for Target. +var ( + Target_name = map[int32]string{ + 0: "TARGET_UNSPECIFIED", + 1: "TARGET_COMPOSITE", + 2: "TARGET_COMPOSITE_AND_CLAIM", + } + Target_value = map[string]int32{ + "TARGET_UNSPECIFIED": 0, + "TARGET_COMPOSITE": 1, + "TARGET_COMPOSITE_AND_CLAIM": 2, + } +) + +func (x Target) Enum() *Target { + p := new(Target) + *p = x + return p +} + +func (x Target) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Target) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[2].Descriptor() +} + +func (Target) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[2] +} + +func (x Target) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Target.Descriptor instead. +func (Target) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{2} +} + +type Status int32 + +const ( + Status_STATUS_CONDITION_UNSPECIFIED Status = 0 + Status_STATUS_CONDITION_UNKNOWN Status = 1 + Status_STATUS_CONDITION_TRUE Status = 2 + Status_STATUS_CONDITION_FALSE Status = 3 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_CONDITION_UNSPECIFIED", + 1: "STATUS_CONDITION_UNKNOWN", + 2: "STATUS_CONDITION_TRUE", + 3: "STATUS_CONDITION_FALSE", + } + Status_value = map[string]int32{ + "STATUS_CONDITION_UNSPECIFIED": 0, + "STATUS_CONDITION_UNKNOWN": 1, + "STATUS_CONDITION_TRUE": 2, + "STATUS_CONDITION_FALSE": 3, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[3].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[3] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{3} +} + +// A RunFunctionRequest requests that the Composition Function be run. +type RunFunctionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this request. + Meta *RequestMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + Observed *State `protobuf:"bytes,2,opt,name=observed,proto3" json:"observed,omitempty"` + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by previous Functions in the + // pipeline. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,3,opt,name=desired,proto3" json:"desired,omitempty"` + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + Input *structpb.Struct `protobuf:"bytes,4,opt,name=input,proto3,oneof" json:"input,omitempty"` + // Optional context. Crossplane may pass arbitary contextual information to a + // Function. A Function may also return context in its RunFunctionResponse, + // and that context will be passed to subsequent Functions. Crossplane + // discards all context returned by the last Function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,5,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Optional extra resources that the Function required. + // Note that extra resources is a map to Resources, plural. + // The map key corresponds to the key in a RunFunctionResponse's + // extra_resources field. If a Function requested extra resources that + // did not exist, Crossplane sets the map key to an empty Resources message to + // indicate that it attempted to satisfy the request. + ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional credentials that this Function may use to communicate with an + // external system. + Credentials map[string]*Credentials `protobuf:"bytes,7,rep,name=credentials,proto3" json:"credentials,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RunFunctionRequest) Reset() { + *x = RunFunctionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionRequest) ProtoMessage() {} + +func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. +func (*RunFunctionRequest) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{0} +} + +func (x *RunFunctionRequest) GetMeta() *RequestMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionRequest) GetObserved() *State { + if x != nil { + return x.Observed + } + return nil +} + +func (x *RunFunctionRequest) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionRequest) GetInput() *structpb.Struct { + if x != nil { + return x.Input + } + return nil +} + +func (x *RunFunctionRequest) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { + if x != nil { + return x.ExtraResources + } + return nil +} + +func (x *RunFunctionRequest) GetCredentials() map[string]*Credentials { + if x != nil { + return x.Credentials + } + return nil +} + +// Credentials that a Function may use to communicate with an external system. +type Credentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the credentials. + // + // Types that are assignable to Source: + // + // *Credentials_CredentialData + Source isCredentials_Source `protobuf_oneof:"source"` +} + +func (x *Credentials) Reset() { + *x = Credentials{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credentials) ProtoMessage() {} + +func (x *Credentials) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credentials.ProtoReflect.Descriptor instead. +func (*Credentials) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{1} +} + +func (m *Credentials) GetSource() isCredentials_Source { + if m != nil { + return m.Source + } + return nil +} + +func (x *Credentials) GetCredentialData() *CredentialData { + if x, ok := x.GetSource().(*Credentials_CredentialData); ok { + return x.CredentialData + } + return nil +} + +type isCredentials_Source interface { + isCredentials_Source() +} + +type Credentials_CredentialData struct { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData *CredentialData `protobuf:"bytes,1,opt,name=credential_data,json=credentialData,proto3,oneof"` +} + +func (*Credentials_CredentialData) isCredentials_Source() {} + +// CredentialData loaded by Crossplane, for example from a Secret. +type CredentialData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data map[string][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CredentialData) Reset() { + *x = CredentialData{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CredentialData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialData) ProtoMessage() {} + +func (x *CredentialData) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CredentialData.ProtoReflect.Descriptor instead. +func (*CredentialData) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{2} +} + +func (x *CredentialData) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + +// Resources represents the state of several Crossplane resources. +type Resources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*Resource `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *Resources) Reset() { + *x = Resources{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resources) ProtoMessage() {} + +func (x *Resources) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resources.ProtoReflect.Descriptor instead. +func (*Resources) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{3} +} + +func (x *Resources) GetItems() []*Resource { + if x != nil { + return x.Items + } + return nil +} + +// A RunFunctionResponse contains the result of a Composition Function run. +type RunFunctionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this response. + Meta *ResponseMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,2,opt,name=desired,proto3" json:"desired,omitempty"` + // Results of the Function run. Results are used for observability purposes. + Results []*Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + // Optional context to be passed to the next Function in the pipeline as part + // of the RunFunctionRequest. Dropped on the last function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,4,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Requirements that must be satisfied for this Function to run successfully. + Requirements *Requirements `protobuf:"bytes,5,opt,name=requirements,proto3" json:"requirements,omitempty"` + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + Conditions []*Condition `protobuf:"bytes,6,rep,name=conditions,proto3" json:"conditions,omitempty"` +} + +func (x *RunFunctionResponse) Reset() { + *x = RunFunctionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionResponse) ProtoMessage() {} + +func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. +func (*RunFunctionResponse) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{4} +} + +func (x *RunFunctionResponse) GetMeta() *ResponseMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionResponse) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionResponse) GetResults() []*Result { + if x != nil { + return x.Results + } + return nil +} + +func (x *RunFunctionResponse) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionResponse) GetRequirements() *Requirements { + if x != nil { + return x.Requirements + } + return nil +} + +func (x *RunFunctionResponse) GetConditions() []*Condition { + if x != nil { + return x.Conditions + } + return nil +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +type RequestMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` +} + +func (x *RequestMeta) Reset() { + *x = RequestMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestMeta) ProtoMessage() {} + +func (x *RequestMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. +func (*RequestMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{5} +} + +func (x *RequestMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +// Requirements that must be satisfied for a Function to run successfully. +type Requirements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extra resources that this Function requires. + // The map key uniquely identifies the group of resources. + ExtraResources map[string]*ResourceSelector `protobuf:"bytes,1,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Requirements) Reset() { + *x = Requirements{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Requirements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Requirements) ProtoMessage() {} + +func (x *Requirements) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Requirements.ProtoReflect.Descriptor instead. +func (*Requirements) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{6} +} + +func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { + if x != nil { + return x.ExtraResources + } + return nil +} + +// ResourceSelector selects a group of resources, either by name or by label. +type ResourceSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API version of resources to select. + ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + // Kind of resources to select. + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Resources to match. + // + // Types that are assignable to Match: + // + // *ResourceSelector_MatchName + // *ResourceSelector_MatchLabels + Match isResourceSelector_Match `protobuf_oneof:"match"` +} + +func (x *ResourceSelector) Reset() { + *x = ResourceSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSelector) ProtoMessage() {} + +func (x *ResourceSelector) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. +func (*ResourceSelector) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceSelector) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *ResourceSelector) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (m *ResourceSelector) GetMatch() isResourceSelector_Match { + if m != nil { + return m.Match + } + return nil +} + +func (x *ResourceSelector) GetMatchName() string { + if x, ok := x.GetMatch().(*ResourceSelector_MatchName); ok { + return x.MatchName + } + return "" +} + +func (x *ResourceSelector) GetMatchLabels() *MatchLabels { + if x, ok := x.GetMatch().(*ResourceSelector_MatchLabels); ok { + return x.MatchLabels + } + return nil +} + +type isResourceSelector_Match interface { + isResourceSelector_Match() +} + +type ResourceSelector_MatchName struct { + // Match the resource with this name. + MatchName string `protobuf:"bytes,3,opt,name=match_name,json=matchName,proto3,oneof"` +} + +type ResourceSelector_MatchLabels struct { + // Match all resources with these labels. + MatchLabels *MatchLabels `protobuf:"bytes,4,opt,name=match_labels,json=matchLabels,proto3,oneof"` +} + +func (*ResourceSelector_MatchName) isResourceSelector_Match() {} + +func (*ResourceSelector_MatchLabels) isResourceSelector_Match() {} + +// MatchLabels defines a set of labels to match resources against. +type MatchLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MatchLabels) Reset() { + *x = MatchLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchLabels) ProtoMessage() {} + +func (x *MatchLabels) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. +func (*MatchLabels) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{8} +} + +func (x *MatchLabels) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +type ResponseMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` +} + +func (x *ResponseMeta) Reset() { + *x = ResponseMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMeta) ProtoMessage() {} + +func (x *ResponseMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. +func (*ResponseMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{9} +} + +func (x *ResponseMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ResponseMeta) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// State of the composite resource (XR) and any composed resources. +type State struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the composite resource (XR). + Composite *Resource `protobuf:"bytes,1,opt,name=composite,proto3" json:"composite,omitempty"` + // The state of any composed resources. + Resources map[string]*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *State) Reset() { + *x = State{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *State) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*State) ProtoMessage() {} + +func (x *State) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use State.ProtoReflect.Descriptor instead. +func (*State) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{10} +} + +func (x *State) GetComposite() *Resource { + if x != nil { + return x.Composite + } + return nil +} + +func (x *State) GetResources() map[string]*Resource { + if x != nil { + return x.Resources + } + return nil +} + +// A Resource represents the state of a composite or composed resource. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The JSON representation of the resource. + // + // - Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. + // + // - A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. + // + // - A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. + // + // - A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. + Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The resource's connection details. + // + // - Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. + // + // - A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. + ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Ready indicates whether the resource should be considered ready. + // + // * Crossplane will never set this field in a RunFunctionRequest. + // + // - A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. + Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{11} +} + +func (x *Resource) GetResource() *structpb.Struct { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetConnectionDetails() map[string][]byte { + if x != nil { + return x.ConnectionDetails + } + return nil +} + +func (x *Resource) GetReady() Ready { + if x != nil { + return x.Ready + } + return Ready_READY_UNSPECIFIED +} + +// A Result of running a Function. +type Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Severity of this result. + Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` + // Human-readable details about the result. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + // The resources this result targets. + Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` +} + +func (x *Result) Reset() { + *x = Result{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Result) ProtoMessage() {} + +func (x *Result) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Result.ProtoReflect.Descriptor instead. +func (*Result) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{12} +} + +func (x *Result) GetSeverity() Severity { + if x != nil { + return x.Severity + } + return Severity_SEVERITY_UNSPECIFIED +} + +func (x *Result) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Result) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *Result) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +type Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of condition in PascalCase. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Status of the condition. + Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1beta1.Status" json:"status,omitempty"` + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + Message *string `protobuf:"bytes,4,opt,name=message,proto3,oneof" json:"message,omitempty"` + // The resources this condition targets. + Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` +} + +func (x *Condition) Reset() { + *x = Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Condition) ProtoMessage() {} + +func (x *Condition) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Condition.ProtoReflect.Descriptor instead. +func (*Condition) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{13} +} + +func (x *Condition) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Condition) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_STATUS_CONDITION_UNSPECIFIED +} + +func (x *Condition) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *Condition) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Condition) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +var File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto protoreflect.FileDescriptor + +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc = []byte{ + 0x0a, 0x3e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2f, 0x7a, 0x7a, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, + 0x06, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, + 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x36, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x6f, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x46, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, + 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x6c, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6b, 0x0a, 0x10, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x59, + 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, + 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x05, 0x69, 0x74, + 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xbb, 0x03, 0x0a, 0x13, 0x52, + 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, + 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x08, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, + 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe0, 0x01, 0x0a, 0x06, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, + 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xf2, + 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, + 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, + 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, + 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, + 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, + 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, + 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x53, 0x49, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x10, + 0x02, 0x2a, 0x7f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x1c, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, + 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x52, 0x55, 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, + 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, + 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x6e, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescOnce sync.Once + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc +) + +func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP() []byte { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescOnce.Do(func() { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData) + }) + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData +} + +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = []interface{}{ + (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready + (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity + (Target)(0), // 2: apiextensions.fn.proto.v1beta1.Target + (Status)(0), // 3: apiextensions.fn.proto.v1beta1.Status + (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest + (*Credentials)(nil), // 5: apiextensions.fn.proto.v1beta1.Credentials + (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1beta1.CredentialData + (*Resources)(nil), // 7: apiextensions.fn.proto.v1beta1.Resources + (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse + (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.RequestMeta + (*Requirements)(nil), // 10: apiextensions.fn.proto.v1beta1.Requirements + (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1beta1.ResourceSelector + (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1beta1.MatchLabels + (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1beta1.ResponseMeta + (*State)(nil), // 14: apiextensions.fn.proto.v1beta1.State + (*Resource)(nil), // 15: apiextensions.fn.proto.v1beta1.Resource + (*Result)(nil), // 16: apiextensions.fn.proto.v1beta1.Result + (*Condition)(nil), // 17: apiextensions.fn.proto.v1beta1.Condition + nil, // 18: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + nil, // 20: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + nil, // 21: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + nil, // 22: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + nil, // 23: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 24: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 25: google.protobuf.Struct + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration +} +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs = []int32{ + 9, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta + 14, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State + 14, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 25, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 25, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 18, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + 19, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + 6, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData + 20, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + 15, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource + 13, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta + 14, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 16, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result + 25, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 10, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements + 17, // 15: apiextensions.fn.proto.v1beta1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1beta1.Condition + 21, // 16: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + 12, // 17: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels + 22, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + 26, // 19: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 15, // 20: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource + 23, // 21: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 25, // 22: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 24, // 23: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 0, // 24: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready + 1, // 25: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity + 2, // 26: apiextensions.fn.proto.v1beta1.Result.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 3, // 27: apiextensions.fn.proto.v1beta1.Condition.status:type_name -> apiextensions.fn.proto.v1beta1.Status + 2, // 28: apiextensions.fn.proto.v1beta1.Condition.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 7, // 29: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources + 5, // 30: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 11, // 31: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector + 15, // 32: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 4, // 33: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 8, // 34: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() } +func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() { + if File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunFunctionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Credentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CredentialData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunFunctionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Requirements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*State); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Credentials_CredentialData)(nil), + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*ResourceSelector_MatchName)(nil), + (*ResourceSelector_MatchLabels)(nil), + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc, + NumEnums: 4, + NumMessages: 21, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes, + DependencyIndexes: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs, + EnumInfos: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes, + MessageInfos: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes, + }.Build() + File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto = out.File + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc = nil + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = nil + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs = nil +} diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto new file mode 100644 index 000000000..52ab9266c --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto @@ -0,0 +1,328 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/duration.proto"; + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package apiextensions.fn.proto.v1beta1; + +option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1"; + +// A FunctionRunnerService is a Composition Function. +service FunctionRunnerService { + // RunFunction runs the Composition Function. + rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} +} + +// A RunFunctionRequest requests that the Composition Function be run. +message RunFunctionRequest { + // Metadata pertaining to this request. + RequestMeta meta = 1; + + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + State observed = 2; + + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by previous Functions in the + // pipeline. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + State desired = 3; + + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + optional google.protobuf.Struct input = 4; + + // Optional context. Crossplane may pass arbitary contextual information to a + // Function. A Function may also return context in its RunFunctionResponse, + // and that context will be passed to subsequent Functions. Crossplane + // discards all context returned by the last Function in the pipeline. + optional google.protobuf.Struct context = 5; + + // Optional extra resources that the Function required. + // Note that extra resources is a map to Resources, plural. + // The map key corresponds to the key in a RunFunctionResponse's + // extra_resources field. If a Function requested extra resources that + // did not exist, Crossplane sets the map key to an empty Resources message to + // indicate that it attempted to satisfy the request. + map extra_resources = 6; + + // Optional credentials that this Function may use to communicate with an + // external system. + map credentials = 7; +} + +// Credentials that a Function may use to communicate with an external system. +message Credentials { + // Source of the credentials. + oneof source { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData credential_data = 1; + } +} + +// CredentialData loaded by Crossplane, for example from a Secret. +message CredentialData { + map data = 1; +} + +// Resources represents the state of several Crossplane resources. +message Resources { + repeated Resource items = 1; +} + +// A RunFunctionResponse contains the result of a Composition Function run. +message RunFunctionResponse { + // Metadata pertaining to this response. + ResponseMeta meta = 1; + + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + // + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + State desired = 2; + + // Results of the Function run. Results are used for observability purposes. + repeated Result results = 3; + + // Optional context to be passed to the next Function in the pipeline as part + // of the RunFunctionRequest. Dropped on the last function in the pipeline. + optional google.protobuf.Struct context = 4; + + // Requirements that must be satisfied for this Function to run successfully. + Requirements requirements = 5; + + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + repeated Condition conditions = 6; +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +message RequestMeta { + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + string tag = 1; +} + +// Requirements that must be satisfied for a Function to run successfully. +message Requirements { + // Extra resources that this Function requires. + // The map key uniquely identifies the group of resources. + map extra_resources = 1; +} + +// ResourceSelector selects a group of resources, either by name or by label. +message ResourceSelector { + // API version of resources to select. + string api_version = 1; + + // Kind of resources to select. + string kind = 2; + + // Resources to match. + oneof match { + // Match the resource with this name. + string match_name = 3; + + // Match all resources with these labels. + MatchLabels match_labels = 4; + } +} + +// MatchLabels defines a set of labels to match resources against. +message MatchLabels { + map labels = 1; +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +message ResponseMeta { + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + string tag = 1; + + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + optional google.protobuf.Duration ttl = 2; +} + +// State of the composite resource (XR) and any composed resources. +message State { + // The state of the composite resource (XR). + Resource composite = 1; + + // The state of any composed resources. + map resources = 2; +} + +// A Resource represents the state of a composite or composed resource. +message Resource { + // The JSON representation of the resource. + // + // * Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. + // + // * A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. + // + // * A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. + // + // * A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. + google.protobuf.Struct resource = 1; + + // The resource's connection details. + // + // * Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. + // + // * A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. + // + // * A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. + map connection_details = 2; + + // Ready indicates whether the resource should be considered ready. + // + // * Crossplane will never set this field in a RunFunctionRequest. + // + // * A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. + // + // * A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. + Ready ready = 3; +} + +// Ready indicates whether a composed resource should be considered ready. +enum Ready { + READY_UNSPECIFIED = 0; + + // True means the composed resource has been observed to be ready. + READY_TRUE = 1; + + // False means the composed resource has not been observed to be ready. + READY_FALSE = 2; +} + +// A Result of running a Function. +message Result { + // Severity of this result. + Severity severity = 1; + + // Human-readable details about the result. + string message = 2; + + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + optional string reason = 3; + + // The resources this result targets. + optional Target target = 4; +} + +// Severity of Function results. +enum Severity { + SEVERITY_UNSPECIFIED = 0; + + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + SEVERITY_FATAL = 1; + + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + SEVERITY_WARNING = 2; + + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + SEVERITY_NORMAL = 3; +} + +// Target of Function results and conditions. +enum Target { + // If the target is unspecified, the result targets the composite resource. + TARGET_UNSPECIFIED = 0; + + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + TARGET_COMPOSITE = 1; + + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + TARGET_COMPOSITE_AND_CLAIM = 2; +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +message Condition { + // Type of condition in PascalCase. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + string reason = 3; + + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + optional string message = 4; + + // The resources this condition targets. + optional Target target = 5; +} + +enum Status { + STATUS_CONDITION_UNSPECIFIED = 0; + + STATUS_CONDITION_UNKNOWN = 1; + + STATUS_CONDITION_TRUE = 2; + + STATUS_CONDITION_FALSE = 3; +} diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go new file mode 100644 index 000000000..ec3da1b32 --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go @@ -0,0 +1,128 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1beta1.FunctionRunnerService/RunFunction" +) + +// FunctionRunnerServiceClient is the client API for FunctionRunnerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FunctionRunnerServiceClient interface { + // RunFunction runs the Composition Function. + RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) +} + +type functionRunnerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewFunctionRunnerServiceClient(cc grpc.ClientConnInterface) FunctionRunnerServiceClient { + return &functionRunnerServiceClient{cc} +} + +func (c *functionRunnerServiceClient) RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) { + out := new(RunFunctionResponse) + err := c.cc.Invoke(ctx, FunctionRunnerService_RunFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FunctionRunnerServiceServer is the server API for FunctionRunnerService service. +// All implementations must embed UnimplementedFunctionRunnerServiceServer +// for forward compatibility +type FunctionRunnerServiceServer interface { + // RunFunction runs the Composition Function. + RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +// UnimplementedFunctionRunnerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedFunctionRunnerServiceServer struct { +} + +func (UnimplementedFunctionRunnerServiceServer) RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunFunction not implemented") +} +func (UnimplementedFunctionRunnerServiceServer) mustEmbedUnimplementedFunctionRunnerServiceServer() {} + +// UnsafeFunctionRunnerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FunctionRunnerServiceServer will +// result in compilation errors. +type UnsafeFunctionRunnerServiceServer interface { + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +func RegisterFunctionRunnerServiceServer(s grpc.ServiceRegistrar, srv FunctionRunnerServiceServer) { + s.RegisterService(&FunctionRunnerService_ServiceDesc, srv) +} + +func _FunctionRunnerService_RunFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FunctionRunnerService_RunFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, req.(*RunFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// FunctionRunnerService_ServiceDesc is the grpc.ServiceDesc for FunctionRunnerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "apiextensions.fn.proto.v1beta1.FunctionRunnerService", + HandlerType: (*FunctionRunnerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RunFunction", + Handler: _FunctionRunnerService_RunFunction_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto", +} diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index b126ab016..680575832 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -45,16 +45,19 @@ type CompositionRevisionSpec struct { // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -66,6 +69,9 @@ type CompositionRevisionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -82,6 +88,9 @@ type CompositionRevisionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -91,10 +100,9 @@ type CompositionRevisionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the diff --git a/apis/generate.go b/apis/generate.go index c06d53324..4d3690710 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -65,6 +65,10 @@ limitations under the License. //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1alpha1 //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1beta1 +// Replicate identical gRPC APIs + +//go:generate ../hack/duplicate_proto_type.sh apiextensions/fn/proto/v1/run_function.proto apiextensions/fn/proto/v1beta1 + // Generate gRPC types and stubs. // // We use buf rather than the traditional protoc because it's pure go and can diff --git a/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go b/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go index 3411301bd..7f2ad383c 100644 --- a/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go +++ b/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go @@ -116,6 +116,36 @@ func (in *FunctionSpec) DeepCopy() *FunctionSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratedFromHubConverter) DeepCopyInto(out *GeneratedFromHubConverter) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratedFromHubConverter. +func (in *GeneratedFromHubConverter) DeepCopy() *GeneratedFromHubConverter { + if in == nil { + return nil + } + out := new(GeneratedFromHubConverter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratedToHubConverter) DeepCopyInto(out *GeneratedToHubConverter) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratedToHubConverter. +func (in *GeneratedToHubConverter) DeepCopy() *GeneratedToHubConverter { + if in == nil { + return nil + } + out := new(GeneratedToHubConverter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetaSpec) DeepCopyInto(out *MetaSpec) { *out = *in diff --git a/cmd/crank/render/render.go b/cmd/crank/render/render.go index 9169faedc..9e8accda3 100644 --- a/cmd/crank/render/render.go +++ b/cmd/crank/render/render.go @@ -43,6 +43,7 @@ import ( apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" + "github.com/crossplane/crossplane/internal/xfn" ) // Wait for the server to be ready before sending RPCs. Notably this gives @@ -50,7 +51,7 @@ import ( // https://grpc.io/docs/guides/wait-for-ready/ const waitForReady = `{ "methodConfig":[{ - "name": [{"service": "apiextensions.fn.proto.v1beta1.FunctionRunnerService"}], + "name": [{}], "waitForReady": true }] }` @@ -136,7 +137,7 @@ func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, re return nil, errors.Errorf("unknown Function %q - does it exist in your Functions file?", name) } - return fnv1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) + return xfn.NewBetaFallBackFunctionRunnerServiceClient(conn).RunFunction(ctx, req) } // Stop all of the runner's runtimes, and close its gRPC connections. diff --git a/hack/duplicate_proto_type.sh b/hack/duplicate_proto_type.sh new file mode 100755 index 000000000..013e1a26a --- /dev/null +++ b/hack/duplicate_proto_type.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# Usage example: +# +# ./duplicate_proto_type.sh apiextensions/fn/proto/v1/run_function.proto apiextensions/fn/proto/v1beta1 +# +# The above command will create zz_generated.run_function.proto in the v1beta1 +# directory. The most specific segment of the package name is assumed to be the +# same as the target directory (i.e. v1beta1). + +set -e + +FROM_PATH=${1} +TO_DIR=${2} + +DO_NOT_EDIT="// Generated from ${FROM_PATH} by ${0}. DO NOT EDIT." + +FROM_DIR=$(dirname ${FROM_PATH}) +FROM_FILE=$(basename ${FROM_PATH}) +FROM_PACKAGE=$(basename ${FROM_DIR}) + +TO_PACKAGE=$(basename ${TO_DIR}) +TO_PATH="${TO_DIR}/zz_generated_${FROM_FILE}" + +sed -r \ + -e "s#^package (.+)\.${FROM_PACKAGE};\$#${DO_NOT_EDIT}\n\npackage \1.${TO_PACKAGE};#" \ + -e "s#^option go_package = \"(.+)/${FROM_PACKAGE}\";\$#option go_package = \"\1/${TO_PACKAGE}\";#" \ + ${FROM_PATH} > ${TO_PATH} + +echo "Duplicated ${FROM_PATH} (package ${FROM_PACKAGE}) to ${TO_PATH} (package ${TO_PACKAGE})." diff --git a/internal/xfn/function_runner.go b/internal/xfn/function_runner.go index 46872e27f..b722da7fc 100644 --- a/internal/xfn/function_runner.go +++ b/internal/xfn/function_runner.go @@ -24,13 +24,17 @@ import ( "github.com/pkg/errors" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/logging" fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" + fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) @@ -136,7 +140,7 @@ func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, r ctx, cancel := context.WithTimeout(ctx, runFunctionTimeout) defer cancel() - rsp, err := fnv1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) + rsp, err := NewBetaFallBackFunctionRunnerServiceClient(conn).RunFunction(ctx, req) return rsp, errors.Wrapf(err, errFmtRunFunction, name) } @@ -306,3 +310,68 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex return closed, nil } + +// A BetaFallBackFunctionRunnerServiceClient tries to send a v1 RPC. If the +// server reports that v1 is unimplemented, it falls back to sending a v1beta1 +// RPC. It translates the v1 RunFunctionRequest to v1beta1 by round-tripping it +// through protobuf encoding. This works because the two messages are guaranteed +// to be identical - the v1beta1 proto is replicated from the v1 proto. +type BetaFallBackFunctionRunnerServiceClient struct { + cc *grpc.ClientConn +} + +// NewBetaFallBackFunctionRunnerServiceClient returns a client that falls back +// to v1beta1 when v1 is unimplemented. +func NewBetaFallBackFunctionRunnerServiceClient(cc *grpc.ClientConn) *BetaFallBackFunctionRunnerServiceClient { + return &BetaFallBackFunctionRunnerServiceClient{cc: cc} +} + +// RunFunction tries to send a v1 RunFunctionRequest. It falls back to v1beta1 +// if the v1 service is unimplemented. +func (c *BetaFallBackFunctionRunnerServiceClient) RunFunction(ctx context.Context, req *fnv1.RunFunctionRequest, opts ...grpc.CallOption) (*fnv1.RunFunctionResponse, error) { + rsp, err := fnv1.NewFunctionRunnerServiceClient(c.cc).RunFunction(ctx, req, opts...) + + // If the v1 RPC worked, just return the response. + if err == nil { + return rsp, nil + } + + // If we hit an error other than Unimplemented, return it. + if status.Code(err) != codes.Unimplemented { + return nil, err + } + + // The v1 RPC is unimplemented. Try the v1beta1 equivalent. The messages + // should be identical in Go and on the wire. + breq, err := toBeta(req) + if err != nil { + return nil, err + } + brsp, err := fnv1beta1.NewFunctionRunnerServiceClient(c.cc).RunFunction(ctx, breq, opts...) + if err != nil { + return nil, err + } + + rsp, err = fromBeta(brsp) + return rsp, err +} + +func toBeta(req *fnv1.RunFunctionRequest) (*fnv1beta1.RunFunctionRequest, error) { + out := &fnv1beta1.RunFunctionRequest{} + b, err := proto.Marshal(req) + if err != nil { + return nil, errors.Wrapf(err, "cannot marshal %T to protobuf bytes", req) + } + err = proto.Unmarshal(b, out) + return out, errors.Wrapf(err, "cannot unmarshal %T protobuf bytes into %T", req, out) +} + +func fromBeta(rsp *fnv1beta1.RunFunctionResponse) (*fnv1.RunFunctionResponse, error) { + out := &fnv1.RunFunctionResponse{} + b, err := proto.Marshal(rsp) + if err != nil { + return nil, errors.Wrapf(err, "cannot marshal %T to protobuf bytes", rsp) + } + err = proto.Unmarshal(b, out) + return out, errors.Wrapf(err, "cannot unmarshal %T protobuf bytes into %T", rsp, out) +} diff --git a/internal/xfn/function_runner_test.go b/internal/xfn/function_runner_test.go index 7fc4e95ef..a4473bcb7 100644 --- a/internal/xfn/function_runner_test.go +++ b/internal/xfn/function_runner_test.go @@ -32,9 +32,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" + fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) +var _ fnv1.FunctionRunnerServiceClient = &BetaFallBackFunctionRunnerServiceClient{} + func TestRunFunction(t *testing.T) { errBoom := errors.New("boom") @@ -180,6 +183,53 @@ func TestRunFunction(t *testing.T) { }, }, }, + "SuccessfulFallbackToBeta": { + reason: "We should create a new client connection and successfully make a v1beta1 request if the server doesn't yet implement v1", + params: params{ + c: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + // Start a gRPC server. + lis := NewBetaGRPCServer(t, &MockBetaFunctionServer{rsp: &fnv1beta1.RunFunctionResponse{ + Meta: &fnv1beta1.ResponseMeta{Tag: "hi!"}, + }}) + listeners = append(listeners, lis) + + l, ok := obj.(*pkgv1.FunctionRevisionList) + if !ok { + // If we're called to list Functions we want to + // return none, to make sure we GC everything. + return nil + } + l.Items = []pkgv1.FunctionRevision{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-fn-revision-a", + }, + Spec: pkgv1.FunctionRevisionSpec{ + PackageRevisionSpec: pkgv1.PackageRevisionSpec{ + DesiredState: pkgv1.PackageRevisionActive, + }, + }, + Status: pkgv1.FunctionRevisionStatus{ + Endpoint: strings.Replace(lis.Addr().String(), "127.0.0.1", "dns:///localhost", 1), + }, + }, + } + return nil + }), + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-fn", + req: &fnv1.RunFunctionRequest{}, + }, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, + }, + }, + }, } for name, tc := range cases { @@ -393,6 +443,27 @@ func NewGRPCServer(t *testing.T, ss fnv1.FunctionRunnerServiceServer) net.Listen return lis } +func NewBetaGRPCServer(t *testing.T, ss fnv1beta1.FunctionRunnerServiceServer) net.Listener { + t.Helper() + + // Listen on a random port. + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + t.Logf("Listening for gRPC connections on %q", lis.Addr().String()) + + // TODO(negz): Is it worth using a WaitGroup for these? + go func() { + s := grpc.NewServer() + fnv1beta1.RegisterFunctionRunnerServiceServer(s, ss) + _ = s.Serve(lis) + }() + + // The caller must close this listener to terminate the server. + return lis +} + type MockFunctionServer struct { fnv1.UnimplementedFunctionRunnerServiceServer @@ -403,3 +474,14 @@ type MockFunctionServer struct { func (s *MockFunctionServer) RunFunction(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { return s.rsp, s.err } + +type MockBetaFunctionServer struct { + fnv1beta1.UnimplementedFunctionRunnerServiceServer + + rsp *fnv1beta1.RunFunctionResponse + err error +} + +func (s *MockBetaFunctionServer) RunFunction(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { + return s.rsp, s.err +} From 31653fe60495cf3c843a0e72ad2647c0fa2318cf Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 15 Aug 2024 19:23:57 -0700 Subject: [PATCH 343/370] Fix typo when hidden --enable-composition-revisions flag is passed Signed-off-by: Nic Cope --- cmd/crossplane/core/core.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index af285bbea..c3c0a2670 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -200,7 +200,7 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli } if !c.EnableCompositionRevisions { - log.Info("Composition Revisions are is GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") + log.Info("Composition Revisions are GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") } if !c.EnableCompositionFunctions { log.Info("Composition Functions are GA and cannot be disabled. The --enable-composition-functions flag will be removed in a future release.") From f5f73edd3b432f105bad765a61777ef023447bef Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 15 Aug 2024 19:26:26 -0700 Subject: [PATCH 344/370] Register the init command under crossplane xpkg I missed this when moving it out from crossplane beta xpkg. Signed-off-by: Nic Cope --- cmd/crank/xpkg/xpkg.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/crank/xpkg/xpkg.go b/cmd/crank/xpkg/xpkg.go index 82c9058af..f996600ee 100644 --- a/cmd/crank/xpkg/xpkg.go +++ b/cmd/crank/xpkg/xpkg.go @@ -23,6 +23,7 @@ package xpkg type Cmd struct { // Keep subcommands sorted alphabetically. Build buildCmd `cmd:"" help:"Build a new package."` + Init initCmd `cmd:"" help:"Initialize a new package from a template."` Install installCmd `cmd:"" help:"Install a package in a control plane."` Login loginCmd `cmd:"" help:"Login to the default package registry."` Logout logoutCmd `cmd:"" help:"Logout of the default package registry."` From a1f67528a7606ee51a08af04832acec66899aeaa Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 16 Aug 2024 12:58:40 -0700 Subject: [PATCH 345/370] Include gRPC method in metrics This method includes the version (v1 or v1beta1). Including it in the metrics will help folks identify whether their control planes are running any functions that are still speaking only v1beta1. Signed-off-by: Nic Cope --- internal/xfn/function_runner_metrics.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/xfn/function_runner_metrics.go b/internal/xfn/function_runner_metrics.go index d3d0b91a7..8b6bf1adf 100644 --- a/internal/xfn/function_runner_metrics.go +++ b/internal/xfn/function_runner_metrics.go @@ -41,20 +41,20 @@ func NewMetrics() *Metrics { Subsystem: "composition", Name: "run_function_request_total", Help: "Total number of RunFunctionRequests sent.", - }, []string{"function_name", "function_package", "grpc_target"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method"}), responses: prometheus.NewCounterVec(prometheus.CounterOpts{ Subsystem: "composition", Name: "run_function_response_total", Help: "Total number of RunFunctionResponses received.", - }, []string{"function_name", "function_package", "grpc_target", "grpc_code", "result_severity"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method", "grpc_code", "result_severity"}), duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: "composition", Name: "run_function_seconds", Help: "Histogram of RunFunctionResponse latency (seconds).", Buckets: prometheus.DefBuckets, - }, []string{"function_name", "function_package", "grpc_target", "grpc_code", "result_severity"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method", "grpc_code", "result_severity"}), } } @@ -80,7 +80,7 @@ func (m *Metrics) Collect(ch chan<- prometheus.Metric) { // function. The supplied package (pkg) should be the package's OCI reference. func (m *Metrics) CreateInterceptor(name, pkg string) grpc.UnaryClientInterceptor { return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - l := prometheus.Labels{"function_name": name, "function_package": pkg, "grpc_target": cc.Target()} + l := prometheus.Labels{"function_name": name, "function_package": pkg, "grpc_target": cc.Target(), "grpc_method": method} m.requests.With(l).Inc() From 332d42b38803604e9e6823ab94ec9d0a9359e720 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Fri, 16 Aug 2024 12:59:40 -0700 Subject: [PATCH 346/370] Remove comment about E2E testing beta features It's still generally relevant, but it was at the functions test which isn't beta anymore. Signed-off-by: Nic Cope --- test/e2e/apiextensions_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 743e681b6..faeb3f27c 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -221,10 +221,6 @@ func TestCompositionRealtimeRevisionSelection(t *testing.T) { ) } -// TODO(negz): How do we want to handle beta features? They're on by default. -// Maybe in this case add a test suite that tests P&T when Functions are -// disabled? - func TestCompositionFunctions(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/functions" environment.Test(t, From 43b12269b957f81bd2f0ed11799a568f94ce3918 Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Thu, 15 Aug 2024 17:08:31 -0700 Subject: [PATCH 347/370] README: clarify public roadmap change process Signed-off-by: Jared Watts --- README.md | 12 ++++++++++++ ROADMAP.md | 11 +++++++++++ 2 files changed, 23 insertions(+) diff --git a/README.md b/README.md index ee6292fa7..e2c1ad83a 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,16 @@ The public roadmap for Crossplane is published as a GitHub project board. Issues added to the roadmap have been triaged and identified as valuable to the community, and therefore a priority for the project that we expect to invest in. +The maintainer team regularly triages requests from the community to identify +features and issues of suitable scope and impact to include in this roadmap. The +community is encouraged to show their support for potential roadmap issues by +adding a :+1: reaction, leaving descriptive comments, and attending the +[regular community meetings] to discuss their requirements and use cases. + +The maintainer team updates the roadmap on an as needed basis, in response to +demand, priority, and available resources. The public roadmap can be updated at +any time. + Milestones assigned to any issues in the roadmap are intended to give a sense of overall priority and the expected order of delivery. They should be considered approximate estimations and are **not** a strict commitment to a specific @@ -76,6 +86,7 @@ encouraged to join. * [Community Calendar][community calendar] ### Special Interest Groups (SIG) + Each SIG collaborates in Slack and some groups have regular meetings, you can find the meetings in the [Community Calendar][community calendar]. - [#sig-composition-environments][sig-composition-environments-slack] @@ -124,6 +135,7 @@ Crossplane is under the Apache 2.0 license. [community calendar]: https://calendar.google.com/calendar/embed?src=c_2cdn0hs9e2m05rrv1233cjoj1k%40group.calendar.google.com [releases]: https://github.com/crossplane/crossplane/releases [ADOPTERS.md]: ADOPTERS.md +[regular community meetings]: https://github.com/crossplane/crossplane/blob/master/README.md#get-involved [Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info [sig-composition-environments-slack]: https://crossplane.slack.com/archives/C05BP6QFLUW [sig-composition-functions-slack]: https://crossplane.slack.com/archives/C031Y29CSAE diff --git a/ROADMAP.md b/ROADMAP.md index 970bd371d..56452b295 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -4,6 +4,16 @@ The public roadmap for Crossplane is published as a GitHub project board. Issues added to the roadmap have been triaged and identified as valuable to the community, and therefore a priority for the project that we expect to invest in. +The maintainer team regularly triages requests from the community to identify +features and issues of suitable scope and impact to include in this roadmap. The +community is encouraged to show their support for potential roadmap issues by +adding a :+1: reaction, leaving descriptive comments, and attending the +[regular community meetings] to discuss their requirements and use cases. + +The maintainer team updates the roadmap on an as needed basis, in response to +demand, priority, and available resources. The public roadmap can be updated at +any time. + Milestones assigned to any issues in the roadmap are intended to give a sense of overall priority and the expected order of delivery. They should be considered approximate estimations and are **not** a strict commitment to a specific @@ -12,3 +22,4 @@ delivery timeline. [Crossplane Roadmap] [Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info +[regular community meetings]: https://github.com/crossplane/crossplane/blob/master/README.md#get-involved From e86d29240b7cbb38c1cdbf4eec0d34beea5ba88a Mon Sep 17 00:00:00 2001 From: Jared Watts Date: Thu, 15 Aug 2024 18:08:42 -0700 Subject: [PATCH 348/370] security: update links to release cycle in security policy Signed-off-by: Jared Watts --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 3265e9322..7663515d6 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -85,7 +85,7 @@ and mentioned in the fixed versions' release notes. ## Supported Versions -See [Crossplane's documentation](https://docs.crossplane.io/master/reference/release-cycle/) +See [Crossplane's documentation](https://docs.crossplane.io/latest/learn/release-cycle/) for information on supported versions of crossplane. Any supported release branch may receive security updates. For any security issues discovered on older versions, non-core packages, or dependencies, please inform maintainers From 9eca638e6aa6c2bc92ad00dce284c6fc3f20b7fe Mon Sep 17 00:00:00 2001 From: Jonathan Oddy Date: Sun, 18 Aug 2024 21:25:58 +0100 Subject: [PATCH 349/370] Fix race condition creating certs in init. Using Update instead of CreateOrUpdate avoids a race by failing with a conflict if the underlying Secret changed between the call to Get and the call to Update. Signed-off-by: Jonathan Oddy --- internal/initializer/tls.go | 65 +++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/internal/initializer/tls.go b/internal/initializer/tls.go index 04330c01f..24c387dd5 100644 --- a/internal/initializer/tls.go +++ b/internal/initializer/tls.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -128,7 +127,9 @@ func (e *TLSCertificateGenerator) loadOrGenerateCA(ctx context.Context, kube cli return nil, errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false kd := caSecret.Data[corev1.TLSPrivateKeyKey] cd := caSecret.Data[corev1.TLSCertKey] if len(kd) != 0 && len(cd) != 0 { @@ -157,13 +158,15 @@ func (e *TLSCertificateGenerator) loadOrGenerateCA(ctx context.Context, kube cli caSecret.Name = nn.Name caSecret.Namespace = nn.Namespace - _, err = controllerruntime.CreateOrUpdate(ctx, kube, caSecret, func() error { - caSecret.Data = map[string][]byte{ - corev1.TLSCertKey: caCrtByte, - corev1.TLSPrivateKeyKey: caKeyByte, - } - return nil - }) + caSecret.Data = map[string][]byte{ + corev1.TLSCertKey: caCrtByte, + corev1.TLSPrivateKeyKey: caKeyByte, + } + if create { + err = kube.Create(ctx, caSecret) + } else { + err = kube.Update(ctx, caSecret) + } if err != nil { return nil, errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } @@ -179,7 +182,9 @@ func (e *TLSCertificateGenerator) ensureClientCertificate(ctx context.Context, k return errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false if len(sec.Data[corev1.TLSPrivateKeyKey]) != 0 || len(sec.Data[corev1.TLSCertKey]) != 0 || len(sec.Data[SecretKeyCACert]) != 0 { e.log.Info("TLS secret contains client certificate.", "secret", nn.Name) return nil @@ -212,17 +217,18 @@ func (e *TLSCertificateGenerator) ensureClientCertificate(ctx context.Context, k if e.owner != nil { sec.OwnerReferences = e.owner } - _, err = controllerruntime.CreateOrUpdate(ctx, kube, sec, func() error { - if sec.Data == nil { - sec.Data = make(map[string][]byte) - } - sec.Data[corev1.TLSCertKey] = certData - sec.Data[corev1.TLSPrivateKeyKey] = keyData - sec.Data[SecretKeyCACert] = signer.certificatePEM - - return nil - }) + if sec.Data == nil { + sec.Data = make(map[string][]byte) + } + sec.Data[corev1.TLSCertKey] = certData + sec.Data[corev1.TLSPrivateKeyKey] = keyData + sec.Data[SecretKeyCACert] = signer.certificatePEM + if create { + err = kube.Create(ctx, sec) + } else { + err = kube.Update(ctx, sec) + } return errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } @@ -234,7 +240,9 @@ func (e *TLSCertificateGenerator) ensureServerCertificate(ctx context.Context, k return errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false if len(sec.Data[corev1.TLSCertKey]) != 0 || len(sec.Data[corev1.TLSPrivateKeyKey]) != 0 || len(sec.Data[SecretKeyCACert]) != 0 { e.log.Info("TLS secret contains server certificate.", "secret", nn.Name) return nil @@ -268,17 +276,18 @@ func (e *TLSCertificateGenerator) ensureServerCertificate(ctx context.Context, k if e.owner != nil { sec.OwnerReferences = e.owner } - _, err = controllerruntime.CreateOrUpdate(ctx, kube, sec, func() error { - if sec.Data == nil { - sec.Data = make(map[string][]byte) - } - sec.Data[corev1.TLSCertKey] = certData - sec.Data[corev1.TLSPrivateKeyKey] = keyData - sec.Data[SecretKeyCACert] = signer.certificatePEM - - return nil - }) + if sec.Data == nil { + sec.Data = make(map[string][]byte) + } + sec.Data[corev1.TLSCertKey] = certData + sec.Data[corev1.TLSPrivateKeyKey] = keyData + sec.Data[SecretKeyCACert] = signer.certificatePEM + if create { + err = kube.Create(ctx, sec) + } else { + err = kube.Update(ctx, sec) + } return errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } From 446d9d0da6620b1e35d54ee5ca54375ccfb7d4a5 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 20 Aug 2024 09:32:43 +0200 Subject: [PATCH 350/370] chore(deps): bump crossplane-runtime to v1.18.0-rc.0 Signed-off-by: Philippe Scorsolini --- .../fn/proto/v1/run_function.pb.go | 46 +++++++++---------- .../v1beta1/zz_generated_run_function.pb.go | 46 +++++++++---------- go.mod | 4 +- go.sum | 8 ++-- 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/apis/apiextensions/fn/proto/v1/run_function.pb.go b/apis/apiextensions/fn/proto/v1/run_function.pb.go index 5b7f99bc0..f7080304c 100644 --- a/apis/apiextensions/fn/proto/v1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1/run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: apiextensions/fn/proto/v1/run_function.proto @@ -1560,7 +1560,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP() []byte { var file_apiextensions_fn_proto_v1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_apiextensions_fn_proto_v1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) -var file_apiextensions_fn_proto_v1_run_function_proto_goTypes = []interface{}{ +var file_apiextensions_fn_proto_v1_run_function_proto_goTypes = []any{ (Ready)(0), // 0: apiextensions.fn.proto.v1.Ready (Severity)(0), // 1: apiextensions.fn.proto.v1.Severity (Target)(0), // 2: apiextensions.fn.proto.v1.Target @@ -1638,7 +1638,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RunFunctionRequest); i { case 0: return &v.state @@ -1650,7 +1650,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Credentials); i { case 0: return &v.state @@ -1662,7 +1662,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CredentialData); i { case 0: return &v.state @@ -1674,7 +1674,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Resources); i { case 0: return &v.state @@ -1686,7 +1686,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*RunFunctionResponse); i { case 0: return &v.state @@ -1698,7 +1698,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RequestMeta); i { case 0: return &v.state @@ -1710,7 +1710,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*Requirements); i { case 0: return &v.state @@ -1722,7 +1722,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ResourceSelector); i { case 0: return &v.state @@ -1734,7 +1734,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*MatchLabels); i { case 0: return &v.state @@ -1746,7 +1746,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ResponseMeta); i { case 0: return &v.state @@ -1758,7 +1758,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*State); i { case 0: return &v.state @@ -1770,7 +1770,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*Resource); i { case 0: return &v.state @@ -1782,7 +1782,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Result); i { case 0: return &v.state @@ -1794,7 +1794,7 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { return nil } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Condition); i { case 0: return &v.state @@ -1807,18 +1807,18 @@ func file_apiextensions_fn_proto_v1_run_function_proto_init() { } } } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].OneofWrappers = []any{ (*Credentials_CredentialData)(nil), } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].OneofWrappers = []any{ (*ResourceSelector_MatchName)(nil), (*ResourceSelector_MatchLabels)(nil), } - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go index d7a926f03..3fe1dd60d 100644 --- a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto @@ -1573,7 +1573,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) -var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = []interface{}{ +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = []any{ (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity (Target)(0), // 2: apiextensions.fn.proto.v1beta1.Target @@ -1651,7 +1651,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return } if !protoimpl.UnsafeEnabled { - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RunFunctionRequest); i { case 0: return &v.state @@ -1663,7 +1663,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Credentials); i { case 0: return &v.state @@ -1675,7 +1675,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CredentialData); i { case 0: return &v.state @@ -1687,7 +1687,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Resources); i { case 0: return &v.state @@ -1699,7 +1699,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*RunFunctionResponse); i { case 0: return &v.state @@ -1711,7 +1711,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RequestMeta); i { case 0: return &v.state @@ -1723,7 +1723,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*Requirements); i { case 0: return &v.state @@ -1735,7 +1735,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ResourceSelector); i { case 0: return &v.state @@ -1747,7 +1747,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*MatchLabels); i { case 0: return &v.state @@ -1759,7 +1759,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ResponseMeta); i { case 0: return &v.state @@ -1771,7 +1771,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*State); i { case 0: return &v.state @@ -1783,7 +1783,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*Resource); i { case 0: return &v.state @@ -1795,7 +1795,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Result); i { case 0: return &v.state @@ -1807,7 +1807,7 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() return nil } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Condition); i { case 0: return &v.state @@ -1820,18 +1820,18 @@ func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() } } } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].OneofWrappers = []any{ (*Credentials_CredentialData)(nil), } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].OneofWrappers = []any{ (*ResourceSelector_MatchName)(nil), (*ResourceSelector_MatchLabels)(nil), } - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/go.mod b/go.mod index 54d37eff4..11a68403a 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.9.0 - github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b + github.com/crossplane/crossplane-runtime v1.18.0-rc.0 github.com/docker/docker v25.0.6+incompatible github.com/docker/go-connections v0.5.0 github.com/emicklei/dot v1.6.2 @@ -27,7 +27,7 @@ require ( golang.org/x/sync v0.7.0 google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.2 k8s.io/api v0.30.0 k8s.io/apiextensions-apiserver v0.30.0 k8s.io/apimachinery v0.30.0 diff --git a/go.sum b/go.sum index 8996d82aa..5dbee703d 100644 --- a/go.sum +++ b/go.sum @@ -125,8 +125,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b h1:XNYG9Px6WsZ8OvfZ/hPDtnglK2jAmmkox1/JUZjXE9I= -github.com/crossplane/crossplane-runtime v1.17.0-rc.0.0.20240628014613-063a0273907b/go.mod h1:saPoKGl3PfzzL8Q6PH+/cIjD0ssrmHW/gmiqstWy+0Q= +github.com/crossplane/crossplane-runtime v1.18.0-rc.0 h1:1QoWF8LSsaJ7ff+vt7NhjrBtSpIHxISSsXCtkndu7/A= +github.com/crossplane/crossplane-runtime v1.18.0-rc.0/go.mod h1:vtglCrnnbq2HurAk9yLHa4qS0bbnCxaKL7C21cQcB/0= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= @@ -612,8 +612,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From ae42fd7a71d0391a071c725b16c8eda2552e49a7 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 20 Aug 2024 10:38:06 +0200 Subject: [PATCH 351/370] chore: add 1.17 release branch to renovate Signed-off-by: Philippe Scorsolini --- .github/renovate.json5 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 7f592e9ef..dff4a4928 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -14,9 +14,9 @@ // PLEASE UPDATE THIS WHEN RELEASING. "baseBranches": [ "master", - "release-1.14", "release-1.15", - "release-1.16" + "release-1.16", + "release-1.17" ], "ignorePaths": [ "design/**", From 3f906fbfbd9dff991649319629267608871e7c9c Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 20 Aug 2024 12:41:46 +0200 Subject: [PATCH 352/370] ci: docker login to right registry when promoting Signed-off-by: Philippe Scorsolini --- Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 6ca83ac2b..6532253fd 100644 --- a/Earthfile +++ b/Earthfile @@ -389,7 +389,7 @@ ci-promote-image: ARG --required CHANNEL FROM alpine:3.20 RUN apk add docker - RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} ${CROSSPLANE_REPO} RUN --push docker buildx imagetools create \ --tag ${CROSSPLANE_REPO}:${CHANNEL} \ --tag ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION}-${CHANNEL} \ From d46a1c5dd4f08634624fa6141f4c705a1303e3a9 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Tue, 20 Aug 2024 15:13:00 +0200 Subject: [PATCH 353/370] ci: --push on promote Signed-off-by: Philippe Scorsolini --- .github/workflows/promote.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 143d6e217..ce69e8393 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -47,6 +47,7 @@ jobs: if: env.DOCKER_USR != '' run: | earthly --strict \ + --push \ --secret DOCKER_USER=${{ secrets.DOCKER_USR }} \ --secret DOCKER_PASSWORD=${{ secrets.DOCKER_PSW }} \ +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=docker.io/crossplane/crossplane @@ -55,6 +56,7 @@ jobs: if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' run: | earthly --strict \ + --push \ --secret DOCKER_USER=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} \ --secret DOCKER_PASSWORD=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} \ +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=xpkg.upbound.io/crossplane/crossplane @@ -63,6 +65,7 @@ jobs: if: env.AWS_USR != '' run: | earthly --strict \ + --push \ --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ --secret=AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_PSW }} \ +ci-promote-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CHANNEL=${{ inputs.channel }} --BUILD_DIR=${GITHUB_REF##*/} --PRERELEASE=${{ inputs.pre-release }} --CROSSPLANE_VERSION=${{ inputs.version }} From 265dc449ccf2dc36d44e5f13ef42de2e210b15fa Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 20 Aug 2024 12:51:43 -0700 Subject: [PATCH 354/370] Omit docker login registry arg when promoting tag in Docker Hub Apparently you get a magic URL in your Docker config file when you omit the registry. It seems to be needed to successfully push. Signed-off-by: Nic Cope --- Earthfile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Earthfile b/Earthfile index 6532253fd..06f0a6417 100644 --- a/Earthfile +++ b/Earthfile @@ -389,7 +389,13 @@ ci-promote-image: ARG --required CHANNEL FROM alpine:3.20 RUN apk add docker - RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} ${CROSSPLANE_REPO} + # We need to omit the registry argument when we're logging into Docker Hub. + # Otherwise login will appear to succeed, but buildx will fail on auth. + IF [[ "${CROSSPLANE_REPO}" == *docker.io/* ]] + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} + ELSE + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} ${CROSSPLANE_REPO} + END RUN --push docker buildx imagetools create \ --tag ${CROSSPLANE_REPO}:${CHANNEL} \ --tag ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION}-${CHANNEL} \ From f55ce852e116c6cba920340f9fba8c6b7cae3273 Mon Sep 17 00:00:00 2001 From: Christian Franco <30784921+vibe@users.noreply.github.com> Date: Wed, 21 Aug 2024 00:59:09 +0000 Subject: [PATCH 355/370] fix: ensure x-kubernetes-preserve-unknown-fields is copied from xrd to crd spec Signed-off-by: Christian Franco <30784921+vibe@users.noreply.github.com> --- internal/xcrd/crd.go | 1 + internal/xcrd/crd_test.go | 303 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 304 insertions(+) diff --git a/internal/xcrd/crd.go b/internal/xcrd/crd.go index 085050083..e7336464e 100644 --- a/internal/xcrd/crd.go +++ b/internal/xcrd/crd.go @@ -188,6 +188,7 @@ func genCrdVersion(vr v1.CompositeResourceDefinitionVersion, maxNameLength int64 xSpec := s.Properties["spec"] cSpec := crdv.Schema.OpenAPIV3Schema.Properties["spec"] cSpec.Required = append(cSpec.Required, xSpec.Required...) + cSpec.XPreserveUnknownFields = xSpec.XPreserveUnknownFields cSpec.XValidations = append(cSpec.XValidations, xSpec.XValidations...) cSpec.OneOf = append(cSpec.OneOf, xSpec.OneOf...) cSpec.Description = xSpec.Description diff --git a/internal/xcrd/crd_test.go b/internal/xcrd/crd_test.go index 60b6dd549..f14c9f0fe 100644 --- a/internal/xcrd/crd_test.go +++ b/internal/xcrd/crd_test.go @@ -1707,6 +1707,309 @@ func TestForCompositeResource(t *testing.T) { c: nil, }, }, + "PreserveUnknownFieldsInSpec": { + reason: "A CRD should set PreserveUnknownFields based on the XRD PreserveUnknownFields.", + args: args{ + v: &v1.CompositeResourceValidation{ + OpenAPIV3Schema: runtime.RawExtension{Raw: []byte(strings.Replace(schema, `"spec": {`, `"spec": { "x-kubernetes-preserve-unknown-fields": true,`, 1))}, + }, + }, + want: want{ + c: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + meta.AsController(meta.TypedReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind)), + }, + }, + Spec: extv1.CustomResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: plural, + Singular: singular, + Kind: kind, + ListKind: listKind, + Categories: []string{CategoryComposite}, + }, + Scope: extv1.ClusterScoped, + Versions: []extv1.CustomResourceDefinitionVersion{{ + Name: version, + Served: true, + Storage: true, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ + { + Name: "SYNCED", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Synced')].status", + }, + { + Name: "READY", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Ready')].status", + }, + { + Name: "COMPOSITION", + Type: "string", + JSONPath: ".spec.compositionRef.name", + }, + { + Name: "AGE", + Type: "date", + JSONPath: ".metadata.creationTimestamp", + }, + }, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: &extv1.JSONSchemaProps{ + Type: "object", + Description: "What the resource is for.", + Required: []string{"spec"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + "metadata": { + // NOTE(muvaf): api-server takes care of validating + // metadata. + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + MaxLength: ptr.To[int64](63), + }, + }, + }, + "spec": { + Type: "object", + Required: []string{"storageGB", "engineVersion"}, + Description: "Specification of the resource.", + XPreserveUnknownFields: ptr.To(true), + Properties: map[string]extv1.JSONSchemaProps{ + // From CRDSpecTemplate.Validation + "storageGB": {Type: "integer", Description: "Pretend this is useful."}, + "engineVersion": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"5.6"`)}, + {Raw: []byte(`"5.7"`)}, + }, + }, + "someField": {Type: "string", Description: "Pretend this is useful."}, + "someOtherField": {Type: "string", Description: "Pretend this is useful."}, + + // From CompositeResourceSpecProps() + "compositionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionRevisionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionRevisionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionUpdatePolicy": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"Automatic"`)}, + {Raw: []byte(`"Manual"`)}, + }, + }, + "claimRef": { + Type: "object", + Required: []string{"apiVersion", "kind", "namespace", "name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "kind": {Type: "string"}, + "namespace": {Type: "string"}, + "name": {Type: "string"}, + }, + }, + "environmentConfigRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + }, + "resourceRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + XListType: ptr.To("atomic"), + }, + "publishConnectionDetailsTo": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "configRef": { + Type: "object", + Default: &extv1.JSON{Raw: []byte(`{"name": "default"}`)}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + }, + }, + "metadata": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "labels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "annotations": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "type": { + Type: "string", + }, + }, + }, + }, + }, + "writeConnectionSecretToRef": { + Type: "object", + Required: []string{"name", "namespace"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "namespace": {Type: "string"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Cannot change engine version", + Rule: "self.engineVersion == oldSelf.engineVersion", + }, + }, + OneOf: []extv1.JSONSchemaProps{ + {Required: []string{"someField"}}, + {Required: []string{"someOtherField"}}, + }, + }, + "status": { + Type: "object", + Description: "Status of the resource.", + Properties: map[string]extv1.JSONSchemaProps{ + "phase": {Type: "string"}, + "something": {Type: "string"}, + + // From CompositeResourceStatusProps() + "conditions": { + Description: "Conditions of the resource.", + Type: "array", + XListType: ptr.To("map"), + XListMapKeys: []string{"type"}, + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"lastTransitionTime", "reason", "status", "type"}, + Properties: map[string]extv1.JSONSchemaProps{ + "lastTransitionTime": {Type: "string", Format: "date-time"}, + "message": {Type: "string"}, + "reason": {Type: "string"}, + "status": {Type: "string"}, + "type": {Type: "string"}, + }, + }, + }, + }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "connectionDetails": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "lastPublishedTime": {Type: "string", Format: "date-time"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Phase is required once set", + Rule: "!has(oldSelf.phase) || has(self.phase)", + }, + }, + OneOf: []extv1.JSONSchemaProps{ + {Required: []string{"phase"}}, + {Required: []string{"something"}}, + }, + }, + }, + }, + }, + }}, + }, + }, + }, + }, } for name, tc := range cases { From 67b3c85fe4c9deca8c3aac785ed245e2c2f621a2 Mon Sep 17 00:00:00 2001 From: Caio Almeida Date: Fri, 16 Aug 2024 12:04:39 -0300 Subject: [PATCH 356/370] docs: changing gympass adopters to wellhub company Signed-off-by: Caio Almeida --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index c6b042f90..b338bfee7 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -33,7 +33,7 @@ This list is sorted in the order that organizations were added to it. | [RunWhen](https://runwhen.com) | @stewartshea | Builds production and developer environments that power the RunWhen Social Reliability Platform.| | [Nethopper](https://nethopper.io) | @ddonahuex | Main IaC component in Nethopper's [Cloud Management Platform](https://www.nethopper.io/platform). Nethopper's Cloud Management Platform combines Crossplane with Continuous Delivery to allow DevOps to create, update, and destroy infrastructure in any cloud.| | [Renault](https://www.renaultgroup.com/) | @smileisak | Building Renault Kubernetes Platform resources using XRDs and compositions for an additional layer of abstraction to provide end-user services. | -| [Gympass](https://gympass.com) | @caiofralmeida | Builds a self-service platform so engineers can be more productive in resource provisioning. | +| [Wellhub (formerly Gympass)](https://wellhub.com) | @caiofralmeida @LCaparelli | Builds a self-service platform so engineers can be more productive in resource provisioning. | | [Deutsche Kreditbank AG](https://www.dkb.de/) | @akesser | At DKB, we have fully integrated Crossplane into our DKB Standard Operating Platform. Starting to move to production in mid-2023, all our clusters now use Crossplane to manage thousands of resources. | | [Akuity](https://akuity.io) | @wanghong230 | Control plane and infrastructure management solution for [Akuity Platform - Managed Argo CD](https://akuity.io/akuity-platform/). Crossplane manages some infrastructure part of dev, staging, and production environments. | | [Neux](https://neux.io) | @styk-tv | In production, running dynamic Crossplane control plane for auto-adjusting kafka/connect/telegraf payload transformations, filtering to/from sources/destinations. | From bcd938b9e14a86d66b2d03fc498ddfbf93dfab37 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Thu, 29 Aug 2024 11:25:19 +0200 Subject: [PATCH 357/370] chore: drop 1.14 and update 1.17 date from release table Signed-off-by: Philippe Scorsolini --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e2c1ad83a..09a0f4801 100644 --- a/README.md +++ b/README.md @@ -24,12 +24,12 @@ documentation]. | Release | Release Date | EOL | |:-------:|:-------------:|:--------:| -| v1.14 | Nov 1, 2023 | Aug 2024 | | v1.15 | Feb 15, 2024 | Nov 2024 | | v1.16 | May 15, 2024 | Feb 2025 | -| v1.17 | Early Aug '24 | May 2025 | +| v1.17 | Aug 29, 2024 | May 2025 | | v1.18 | Early Nov '24 | Aug 2025 | | v1.19 | Early Feb '25 | Nov 2025 | +| v1.20 | Early May '25 | Feb 2025 | You can subscribe to the [community calendar] to track all release dates, and find the most recent releases on the [releases] page. From 972c24252865ea67117bc10d79276fd65dbc56d8 Mon Sep 17 00:00:00 2001 From: Philippe Scorsolini Date: Fri, 30 Aug 2024 16:52:13 +0200 Subject: [PATCH 358/370] chore: right year for 1.20 EOL Co-authored-by: Jared Watts Signed-off-by: Philippe Scorsolini --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 09a0f4801..6c833616a 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ documentation]. | v1.17 | Aug 29, 2024 | May 2025 | | v1.18 | Early Nov '24 | Aug 2025 | | v1.19 | Early Feb '25 | Nov 2025 | -| v1.20 | Early May '25 | Feb 2025 | +| v1.20 | Early May '25 | Feb 2026 | You can subscribe to the [community calendar] to track all release dates, and find the most recent releases on the [releases] page. From 72175d614f6f12feb936c7d94d4ff06e3eeb8f48 Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Tue, 3 Sep 2024 12:54:19 +0200 Subject: [PATCH 359/370] Updates repo links, workflow configurations and other references from master to main. Signed-off-by: Jean du Plessis --- .github/PULL_REQUEST_TEMPLATE.md | 4 +-- .github/renovate.json5 | 18 ++++++------- .github/workflows/ci.yml | 26 +++++++++---------- ADOPTERS.md | 2 +- CHARTER.md | 2 +- CODE_OF_CONDUCT.md | 2 +- README.md | 2 +- ROADMAP.md | 2 +- SECURITY.md | 2 +- contributing/README.md | 2 +- .../guide-adding-external-secret-stores.md | 2 +- contributing/guide-adding-fuzz-test-cases.md | 8 +++--- contributing/guide-provider-development.md | 2 +- contributing/specifications/functions.md | 2 +- design/README.md | 2 +- ...sign-doc-composition-validating-webhook.md | 2 +- design/design-doc-external-secret-stores.md | 2 +- design/design-doc-observe-only-resources.md | 2 +- design/design-doc-packages-v2.md | 16 ++++++------ .../design-doc-terraform-provider-runtime.md | 2 +- design/design-doc-terrajet.md | 4 +-- .../one-pager-cross-resource-referencing.md | 8 +++--- design/one-pager-helm-provider.md | 4 +-- design/one-pager-k8s-native-providers.md | 4 +-- .../one-pager-managed-resource-api-design.md | 2 +- design/proposal-controller-code-generation.md | 2 +- hack/linter-violation.tmpl | 2 +- 27 files changed, 64 insertions(+), 64 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6f84d7d11..6ea685d8f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -27,7 +27,7 @@ I have: Need help with this checklist? See the [cheat sheet]. -[contribution process]: https://github.com/crossplane/crossplane/tree/master/contributing +[contribution process]: https://github.com/crossplane/crossplane/tree/main/contributing [docs tracking issue]: https://github.com/crossplane/docs/issues/new [document this change]: https://docs.crossplane.io/contribute/contribute -[cheat sheet]: https://github.com/crossplane/crossplane/tree/master/contributing#checklist-cheat-sheet +[cheat sheet]: https://github.com/crossplane/crossplane/tree/main/contributing#checklist-cheat-sheet diff --git a/.github/renovate.json5 b/.github/renovate.json5 index dff4a4928..407bc82ac 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -13,7 +13,7 @@ // The branches renovate should target // PLEASE UPDATE THIS WHEN RELEASING. "baseBranches": [ - "master", + "main", "release-1.15", "release-1.16", "release-1.17" @@ -166,12 +166,12 @@ // be at the beginning, high priority at the end "packageRules": [ { - "description": "Generate code after upgrading go dependencies (master)", + "description": "Generate code after upgrading go dependencies (main)", "matchDatasources": [ "go" ], - // Currently we only have an Earthfile on master. - matchBaseBranches: ["master"], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["main"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -188,7 +188,7 @@ "matchDatasources": [ "go" ], - // Currently we only have an Earthfile on master. + // Currently we only have an Earthfile on main. matchBaseBranches: ["release-.+"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. @@ -202,12 +202,12 @@ }, }, { - "description": "Lint code after upgrading golangci-lint (master)", + "description": "Lint code after upgrading golangci-lint (main)", "matchDepNames": [ "golangci/golangci-lint" ], - // Currently we only have an Earthfile on master. - matchBaseBranches: ["master"], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["main"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. "commands": [ @@ -224,7 +224,7 @@ "matchDepNames": [ "golangci/golangci-lint" ], - // Currently we only have an Earthfile on master. + // Currently we only have an Earthfile on main. matchBaseBranches: ["release-.+"], postUpgradeTasks: { // Post-upgrade tasks that are executed before a commit is made by Renovate. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ae5452e6..e872dbc4b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,7 @@ name: CI on: push: branches: - - master + - main - release-* pull_request: {} workflow_dispatch: {} @@ -52,7 +52,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: | echo "EARTHLY_PUSH=true" >> $GITHUB_ENV echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV @@ -98,7 +98,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: | echo "EARTHLY_PUSH=true" >> $GITHUB_ENV echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV @@ -134,7 +134,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: | echo "EARTHLY_PUSH=true" >> $GITHUB_ENV echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV @@ -198,7 +198,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: | echo "EARTHLY_PUSH=true" >> $GITHUB_ENV echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV @@ -250,7 +250,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: | echo "EARTHLY_PUSH=true" >> $GITHUB_ENV echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV @@ -322,11 +322,11 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure Earthly to Push Cache to GitHub Container Registry - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/main' run: echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Configure Earthly to Push Artifacts - if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' + if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' && env.AWS_USR != '' run: echo "EARTHLY_PUSH=true" >> $GITHUB_ENV - name: Set CROSSPLANE_VERSION GitHub Environment Variable @@ -344,7 +344,7 @@ jobs: +ci-push-build-artifacts --AWS_DEFAULT_REGION=us-east-1 --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} --BUILD_DIR=${GITHUB_REF##*/} - name: Push Artifacts to https://releases.crossplane.io/master/ and https://charts.crossplane.io/master - if: env.AWS_USR != '' && github.ref == 'refs/heads/master' + if: env.AWS_USR != '' && github.ref == 'refs/heads/main' run: | earthly --strict \ --secret=AWS_ACCESS_KEY_ID=${{ secrets.AWS_USR }} \ @@ -405,14 +405,14 @@ jobs: # https://github.com/bufbuild/buf-push-action/issues/34 - name: Detect Breaking Changes in Protocol Buffers uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1 - # We want to run this for the master branch, and PRs against master. - if: ${{ github.ref == 'refs/heads/master' || github.base_ref == 'master' }} + # We want to run this for the main branch, and PRs against main. + if: ${{ github.ref == 'refs/heads/main' || github.base_ref == 'main' }} with: input: apis - against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=master,subdir=apis" + against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=main,subdir=apis" - name: Push Protocol Buffers to Buf Schema Registry - if: ${{ github.repository == 'crossplane/crossplane' && github.ref == 'refs/heads/master' }} + if: ${{ github.repository == 'crossplane/crossplane' && github.ref == 'refs/heads/main' }} uses: bufbuild/buf-push-action@v1 with: input: apis diff --git a/ADOPTERS.md b/ADOPTERS.md index b338bfee7..4a764ac6a 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -15,7 +15,7 @@ maturity, and momentum. Thank you to all adopters and contributors of the Cross To add your organization to this list, you can choose any of the following options: 1. [Open a PR](https://github.com/crossplane/crossplane/pulls) to directly update this list, or - [edit this file](https://github.com/crossplane/crossplane/edit/master/ADOPTERS.md) directly in + [edit this file](https://github.com/crossplane/crossplane/edit/main/ADOPTERS.md) directly in Github 1. Fill out the [adopters form](https://forms.gle/dBQhiyYkYSdzXovN6) 1. Send an email to with your information for the table below diff --git a/CHARTER.md b/CHARTER.md index 77305b82e..f010b53dc 100644 --- a/CHARTER.md +++ b/CHARTER.md @@ -116,4 +116,4 @@ This is a living document. Changes to the scope, principles, or mission statement of the Crossplane project require a [majority vote][sc-voting] of the steering committee. -[sc-voting]: https://github.com/crossplane/crossplane/blob/master/GOVERNANCE.md#updating-the-governance +[sc-voting]: https://github.com/crossplane/crossplane/blob/main/GOVERNANCE.md#updating-the-governance diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index a3ee6a8a5..ea1133ab7 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ ## Community Code of Conduct -This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/README.md b/README.md index 6c833616a..6a6b24ca7 100644 --- a/README.md +++ b/README.md @@ -135,7 +135,7 @@ Crossplane is under the Apache 2.0 license. [community calendar]: https://calendar.google.com/calendar/embed?src=c_2cdn0hs9e2m05rrv1233cjoj1k%40group.calendar.google.com [releases]: https://github.com/crossplane/crossplane/releases [ADOPTERS.md]: ADOPTERS.md -[regular community meetings]: https://github.com/crossplane/crossplane/blob/master/README.md#get-involved +[regular community meetings]: https://github.com/crossplane/crossplane/blob/main/README.md#get-involved [Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info [sig-composition-environments-slack]: https://crossplane.slack.com/archives/C05BP6QFLUW [sig-composition-functions-slack]: https://crossplane.slack.com/archives/C031Y29CSAE diff --git a/ROADMAP.md b/ROADMAP.md index 56452b295..737805b58 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -22,4 +22,4 @@ delivery timeline. [Crossplane Roadmap] [Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info -[regular community meetings]: https://github.com/crossplane/crossplane/blob/master/README.md#get-involved +[regular community meetings]: https://github.com/crossplane/crossplane/blob/main/README.md#get-involved diff --git a/SECURITY.md b/SECURITY.md index 7663515d6..5b3504a42 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -38,7 +38,7 @@ To report a vulnerability, either: The reporter(s) can typically expect a response within 24 hours acknowledging the issue was received. If a response is not received within 24 hours, please reach out to any -[maintainer](https://github.com/crossplane/crossplane/blob/master/OWNERS.md#maintainers) +[maintainer](https://github.com/crossplane/crossplane/blob/main/OWNERS.md#maintainers) directly to confirm receipt of the issue. ### Report Content diff --git a/contributing/README.md b/contributing/README.md index ae6eb62b3..474afaace 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -846,7 +846,7 @@ func TestExample(t *testing.T) { ``` [Slack]: https://slack.crossplane.io/ -[code of conduct]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md +[code of conduct]: https://github.com/cncf/foundation/blob/main/code-of-conduct.md [Earthly]: https://docs.earthly.dev [get-docker]: https://docs.docker.com/get-docker [get-earthly]: https://earthly.dev/get-earthly diff --git a/contributing/guide-adding-external-secret-stores.md b/contributing/guide-adding-external-secret-stores.md index 8f4093c40..0b14d2241 100644 --- a/contributing/guide-adding-external-secret-stores.md +++ b/contributing/guide-adding-external-secret-stores.md @@ -124,7 +124,7 @@ requires some dirty work as we need to this for all types: You can check [this commit as an example for changes in Setup functions] as an example. -[External Secret Stores]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-external-secret-stores.md +[External Secret Stores]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-external-secret-stores.md [this PR as a complete example]: https://github.com/crossplane/provider-gcp/pull/421 [this PR instead]: https://github.com/crossplane-contrib/provider-jet-template/pull/23/commits [this commit as an example on how to add the type]: https://github.com/crossplane-contrib/provider-aws/pull/1242/commits/d8a2df323fa2489d82bf1843d2fe338de033c61d diff --git a/contributing/guide-adding-fuzz-test-cases.md b/contributing/guide-adding-fuzz-test-cases.md index e6d4a0a02..0775f5dbc 100644 --- a/contributing/guide-adding-fuzz-test-cases.md +++ b/contributing/guide-adding-fuzz-test-cases.md @@ -102,12 +102,12 @@ put in place a few configurations across different repositories: [CIFuzz]: https://google.github.io/oss-fuzz/getting-started/continuous-integration/ -[fuzz-audit-report]: https://github.com/crossplane/crossplane/blob/master/security/ADA-fuzzing-audit-22.pdf +[fuzz-audit-report]: https://github.com/crossplane/crossplane/blob/main/security/ADA-fuzzing-audit-22.pdf [go-fuzz]: https://go.dev/security/fuzz/ [oss-fuzz-arch]: https://google.github.io/oss-fuzz/architecture/ [oss-fuzz-folder]: https://github.com/google/oss-fuzz/tree/master/projects/crossplane [oss-fuzz]: https://github.com/google/oss-fuzz -[xp-ci]: https://github.com/crossplane/crossplane/blob/master/.github/workflows/ci.yml +[xp-ci]: https://github.com/crossplane/crossplane/blob/main/.github/workflows/ci.yml [xp-fuzz-tests]: https://github.com/search?q=repo%3Acrossplane%2Fcrossplane+%22func+Fuzz%22&type=code -[xp-fuzz_oss_build]: https://github.com/crossplane/crossplane/blob/master/test/fuzz/oss_fuzz_build.sh -[xp-r-fuzz_oss_build]: https://github.com/crossplane/crossplane-runtime/blob/master/test/fuzz/oss_fuzz_build.sh +[xp-fuzz_oss_build]: https://github.com/crossplane/crossplane/blob/main/test/fuzz/oss_fuzz_build.sh +[xp-r-fuzz_oss_build]: https://github.com/crossplane/crossplane-runtime/blob/main/test/fuzz/oss_fuzz_build.sh diff --git a/contributing/guide-provider-development.md b/contributing/guide-provider-development.md index 469e13f62..2a4b6c582 100644 --- a/contributing/guide-provider-development.md +++ b/contributing/guide-provider-development.md @@ -644,7 +644,7 @@ feedback you may have about the development process! [reach out]: https://github.com/crossplane/crossplane#get-involved [crossplane org]: https://github.com/crossplane [`angryjet`]: https://github.com/crossplane/crossplane-tools -[Managed Resource API Patterns]: https://github.com/crossplane/crossplane/blob/master/design/one-pager-managed-resource-api-design.md +[Managed Resource API Patterns]: https://github.com/crossplane/crossplane/blob/main/design/one-pager-managed-resource-api-design.md [Crossplane CLI]: https://github.com/crossplane/crossplane-cli#quick-start-stacks [`angryjet` documentation]: https://github.com/crossplane/crossplane-tools/blob/master/README.md [code generation guide]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md diff --git a/contributing/specifications/functions.md b/contributing/specifications/functions.md index b43b954cc..50bf81e8f 100644 --- a/contributing/specifications/functions.md +++ b/contributing/specifications/functions.md @@ -15,7 +15,7 @@ A Function MUST implement a gRPC `FunctionRunnerService` server. A Function SHOULD implement the latest available version of this service - e.g. `v1beta1`. The authoritative definition of this service can be found at the following URL. -https://github.com/crossplane/crossplane/tree/master/apis/apiextensions/fn/proto +https://github.com/crossplane/crossplane/tree/main/apis/apiextensions/fn/proto A Function MUST copy the tag field from a RunFunctionRequest's RequestMeta message to the ResponseMeta tag field of the corresponding RunFunctionResponse. diff --git a/design/README.md b/design/README.md index 9cd3da7dc..b0737e656 100644 --- a/design/README.md +++ b/design/README.md @@ -39,7 +39,7 @@ welcome from any member of the Crossplane community, but feedback from the elected reviewers carries extra weight. The __document status__ reflects the lifecycle of the design. Designs may be -committed to master at any stage in their lifecycle as long as the status is +committed to `main` at any stage in their lifecycle as long as the status is indicated clearly. Use one of the following statuses: * _Speculative_ designs explore an idea without _yet_ explicitly proposing a diff --git a/design/design-doc-composition-validating-webhook.md b/design/design-doc-composition-validating-webhook.md index 756db6e25..ebf44d9f7 100644 --- a/design/design-doc-composition-validating-webhook.md +++ b/design/design-doc-composition-validating-webhook.md @@ -403,7 +403,7 @@ them. Already covered in the Background section with pros and cons. [original-webhook-pr]: https://github.com/crossplane/crossplane/pull/2919 -[original-webhook-design-doc]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-webhooks.md +[original-webhook-design-doc]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-webhooks.md [original-composition-validation-webhook-issue]: https://github.com/crossplane/crossplane/issues/1476 [vscode-plugin]: https://github.com/upbound/vscode-up [upbound/up]: https://github.com/upbound/up/blob/main/internal/xpkg/snapshot/composition.go#L66 diff --git a/design/design-doc-external-secret-stores.md b/design/design-doc-external-secret-stores.md index 711eb1b2f..b3f5df333 100644 --- a/design/design-doc-external-secret-stores.md +++ b/design/design-doc-external-secret-stores.md @@ -635,6 +635,6 @@ related discussion or issue._ [Vault agent inject template]: https://learn.hashicorp.com/tutorials/vault/kubernetes-sidecar#apply-a-template-to-the-injected-secrets [ArgoCD cluster]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters [AWS secret manager]: https://aws.amazon.com/secrets-manager/ -[provider-aws Secret resource]: https://github.com/crossplane/provider-aws/blob/master/examples/secretsmanager/secret.yaml +[provider-aws Secret resource]: https://github.com/crossplane-contrib/provider-aws/blob/master/examples/secretsmanager/secret.yaml [GenericSecret]: https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/generic_secret [kubernetes-sigs/secrets-store-csi-driver]: https://github.com/kubernetes-sigs/secrets-store-csi-driver diff --git a/design/design-doc-observe-only-resources.md b/design/design-doc-observe-only-resources.md index 04ba953ec..aab2993f6 100644 --- a/design/design-doc-observe-only-resources.md +++ b/design/design-doc-observe-only-resources.md @@ -716,5 +716,5 @@ it as a migration path to Crossplane. [most recent AMI]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami#most_recent [desired tags]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc#tags [passing sensitive configuration]: https://github.com/crossplane/crossplane/pull/2886#discussion_r862615416 -[`type: Webhook` composition function]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-composition-functions.md#using-webhooks-to-run-functions +[`type: Webhook` composition function]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-composition-functions.md#using-webhooks-to-run-functions [ignore changes]: https://github.com/crossplane/crossplane/blob/ad0ff7d6d0e4850168883905ed8e1509089cea15/design/one-pager-ignore-changes.md \ No newline at end of file diff --git a/design/design-doc-packages-v2.md b/design/design-doc-packages-v2.md index 65fd360e3..e8268302b 100644 --- a/design/design-doc-packages-v2.md +++ b/design/design-doc-packages-v2.md @@ -11,21 +11,21 @@ there are many other packaging formats in the Kubernetes ecosystem, Crossplane supports its own for the following reasons: - Crossplane is [opinionated about the capabilities of a - controller](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#allowed-resource-access) + controller](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#allowed-resource-access) that can be installed to extend its functionality. For instance, controllers [may not run as - root](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#package-deployment-privileges) + root](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#package-deployment-privileges) or request cluster admin RBAC. - Crossplane [allocates and aggregates various - ClusterRoles](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#crossplane-clusterroles--rbac) + ClusterRoles](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#crossplane-clusterroles--rbac) to automatically provide permissions for users in the Kubernetes cluster to view / edit / create / delete CRDs installed by a package. - Crossplane guards against conflicting CRDs being installed into a cluster. - Crossplane adds [additional metadata to - CRDs](https://github.com/crossplane/crossplane/blob/master/design/one-pager-stack-ui-metadata.md#crd-annotation-example) + CRDs](https://github.com/crossplane/crossplane/blob/main/design/one-pager-stack-ui-metadata.md#crd-annotation-example) to provide additional context for displaying their configuration in a UI. - Crossplane [adds labels to - CRDs](https://github.com/crossplane/crossplane/blob/master/design/one-pager-stack-relationship-labels.md#example-wordpress-crdyaml-parented-by-stackinstall) + CRDs](https://github.com/crossplane/crossplane/blob/main/design/one-pager-stack-relationship-labels.md#example-wordpress-crdyaml-parented-by-stackinstall) in an attempt to establish parent-child relationships between CRDs. In addition, the following unimplemented features are goals of the Crossplane @@ -56,12 +56,12 @@ effective than desired in practice. The current package infrastructure, though well thought out, has become somewhat convoluted and redundant with the introduction of -[composition](https://github.com/crossplane/crossplane/blob/master/design/design-doc-composition.md) +[composition](https://github.com/crossplane/crossplane/blob/main/design/design-doc-composition.md) into the Crossplane ecosystem. Composition solves the following goals originally intended to be addressed by a `PackageInstall` and [template -stacks](https://github.com/crossplane/crossplane/blob/master/design/one-pager-resource-packs.md): +stacks](https://github.com/crossplane/crossplane/blob/main/design/one-pager-resource-packs.md): - Ability to publish infrastructure abstractions to specific namespaces. - The `PackageInstall` allowed packages to install a namespace-scoped CRD and @@ -175,7 +175,7 @@ workflow the package manager uses for installing a `Package`. We will use a [syncing](https://github.com/crossplane/crossplane/blob/6fc50822fbf11a7d31f8a9dabde5c8948c3b36ac/pkg/controller/packages/pkg/pkg.go#L696) the `Secret` for the `ServiceAccount` that are required for running the controller in [host aware - mode](https://github.com/crossplane/crossplane/blob/master/design/one-pager-host-aware-stack-manager.md). + mode](https://github.com/crossplane/crossplane/blob/main/design/one-pager-host-aware-stack-manager.md). The process for a `PackageInstall` is very similar, but the packages using the templating controller have the additional step of first producing a diff --git a/design/design-doc-terraform-provider-runtime.md b/design/design-doc-terraform-provider-runtime.md index 814e84da1..8a4c7991a 100644 --- a/design/design-doc-terraform-provider-runtime.md +++ b/design/design-doc-terraform-provider-runtime.md @@ -372,7 +372,7 @@ between: 1. specifying structured metadata describing references to the code generator 2. add reference handling callbacks to the `plugin.Implementation` scheme. These could plug into the `Reconciler` via the - [ReferenceResolver](https://github.com/crossplane/crossplane-runtime/blob/master/pkg/reconciler/managed/reconciler.go#L143) + [ReferenceResolver](https://github.com/crossplane/crossplane-runtime/blob/main/pkg/reconciler/managed/reconciler.go#L143) interface. 3. building a reference implementation that does not need to know how resources can refer to each other diff --git a/design/design-doc-terrajet.md b/design/design-doc-terrajet.md index acaf33e13..2f5a1e0d7 100644 --- a/design/design-doc-terrajet.md +++ b/design/design-doc-terrajet.md @@ -553,9 +553,9 @@ license restrictions. [resolve-references-example]: https://github.com/crossplane/provider-aws/blob/c269977/apis/apigatewayv2/v1alpha1/referencers.go#L30 [resolve-references]: https://github.com/crossplane/crossplane-runtime/blob/f2440d9/pkg/reference/reference.go#L105 [dcl]: https://github.com/GoogleCloudPlatform/declarative-resource-client-library/blob/338dce1/services/google/compute/firewall_policy_rule.go#L321 -[ack-codegen]: https://github.com/crossplane/provider-aws/blob/master/CODE_GENERATION.md +[ack-codegen]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md [crossplane-tools]: https://github.com/crossplane/crossplane-tools/ -[ack-guide]: https://github.com/crossplane/provider-aws/blob/master/CODE_GENERATION.md +[ack-guide]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md [secret-key-selector]: https://github.com/crossplane/crossplane-runtime/blob/36fc69eff96ecb5856f156fec077ed3f3c3b30b1/apis/common/v1/resource.go#L72 [instance-state]: https://github.com/hashicorp/terraform-plugin-sdk/blob/0e34772/helper/schema/resource.go#L859 [resource-data]: https://github.com/hashicorp/terraform-plugin-sdk/blob/0e34772dad547d6b69148f57d95b324af9929542/helper/schema/resource_data.go#L22 diff --git a/design/one-pager-cross-resource-referencing.md b/design/one-pager-cross-resource-referencing.md index 9d6967936..34d40f177 100644 --- a/design/one-pager-cross-resource-referencing.md +++ b/design/one-pager-cross-resource-referencing.md @@ -281,15 +281,15 @@ dependent objects are deleted first. * [Honoring inter-resource dependency when creating/deleting resources](https://github.com/crossplane/crossplane/issues/708) * [Resource - Connectivity](https://github.com/crossplane/crossplane/blob/master/design/one-pager-resource-connectivity-mvp.md) + Connectivity](https://github.com/crossplane/crossplane/blob/main/design/one-pager-resource-connectivity-mvp.md) [gitops doc]: (https://www.weave.works/blog/what-is-gitops-really) [Subnet type]: -(https://github.com/crossplane/provider-aws/blob/master/apis/network/v1alpha2/subnet_types.go#L25-L37) +(https://github.com/crossplane-contrib/provider-aws/blob/master/apis/network/v1alpha2/subnet_types.go#L25-L37) [Subnetwork type]: -(https://github.com/crossplane/provider-gcp/blob/master/apis/compute/v1alpha2/subnetwork_types.go#L144) +(https://github.com/crossplane-contrib/provider-gcp/blob/master/apis/compute/v1alpha2/subnetwork_types.go#L144) [Managed Reconciler]: -https://github.com/crossplane/crossplane-runtime/blob/master/pkg/reconciler/managed/reconciler.go +https://github.com/crossplane/crossplane-runtime/blob/main/pkg/reconciler/managed/reconciler.go [Foreground cascading deletion]: (https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#foreground-cascading-deletion) diff --git a/design/one-pager-helm-provider.md b/design/one-pager-helm-provider.md index e4072681c..a17afee7c 100644 --- a/design/one-pager-helm-provider.md +++ b/design/one-pager-helm-provider.md @@ -26,8 +26,8 @@ clients/libraries for helm), **we will focus and only support Helm 3**. We will implement a Kubernetes controller watching `Release` custom resources and deploying helm charts with the desired configuration. Since this controller needs to interact with Kubernetes API server, it is a good fit for [Kubernetes -native providers](https://github.com/crossplane/crossplane/blob/master/design/one-pager-k8s-native-providers.md#kubernetes-native-providers) -concept in Crossplane. By using existing [Kubernetes Provider](https://github.com/crossplane/crossplane/blob/master/design/one-pager-k8s-native-providers.md#proposal-kubernetes-provider-kind) +native providers](https://github.com/crossplane/crossplane/blob/main/design/one-pager-k8s-native-providers.md#kubernetes-native-providers) +concept in Crossplane. By using existing [Kubernetes Provider](https://github.com/crossplane/crossplane/blob/main/design/one-pager-k8s-native-providers.md#proposal-kubernetes-provider-kind) Kind, we will be able to manage helm releases in **Crossplane provisioned external clusters**, **existing external clusters** and also **Crossplane control cluster** (a.k.a. local cluster). diff --git a/design/one-pager-k8s-native-providers.md b/design/one-pager-k8s-native-providers.md index b4ae23641..379c6632d 100644 --- a/design/one-pager-k8s-native-providers.md +++ b/design/one-pager-k8s-native-providers.md @@ -554,8 +554,8 @@ func (c *ClusterController) SetupWithManager(mgr ctrl.Manager) error { [Crossplane CLI]: https://github.com/crossplane/crossplane-cli [client-go]: https://github.com/kubernetes/client-go [managed reconciler]: https://github.com/crossplane/crossplane/blob/14fa6dda6a3e91d5f1ac98d1020a151b02311cb1/pkg/controller/workload/kubernetes/resource/resource.go#L401 -[claim reconciler]: https://github.com/crossplane/crossplane-runtime/blob/master/pkg/resource/claim_reconciler.go -[scheduler controller]: https://github.com/crossplane/crossplane/blob/master/pkg/controller/workload/kubernetes/scheduler/scheduler.go +[claim reconciler]: https://github.com/crossplane/crossplane-runtime/blob/main/pkg/resource/claim_reconciler.go +[scheduler controller]: https://github.com/crossplane/crossplane/blob/main/pkg/controller/workload/kubernetes/scheduler/scheduler.go [crossplane-runtime]: https://github.com/crossplane/crossplane-runtime [crossplane-runtime #22]: https://github.com/crossplane/crossplane-runtime/issues/22 [crossplane-runtime #34]: https://github.com/crossplane/crossplane-runtime/issues/34 diff --git a/design/one-pager-managed-resource-api-design.md b/design/one-pager-managed-resource-api-design.md index 26d46753a..fedc7a6f2 100644 --- a/design/one-pager-managed-resource-api-design.md +++ b/design/one-pager-managed-resource-api-design.md @@ -706,7 +706,7 @@ adding a field about that sync status and reconciler can mark the sync status in one of the `Condition`s we already have or add a new one. [package]: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/eks -[terminology]: https://github.com/crossplane/crossplane/blob/master/docs/concepts/terminology.md +[terminology]: https://github.com/crossplane/crossplane/blob/main/docs/concepts/terminology.md [from crossplane-runtime]: https://github.com/crossplane/crossplane-runtime/blob/ca4b6b4/apis/core/v1alpha1/resource.go#L77 [Kubernetes API Conventions - Spec and Status]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status [managed reconciler]: https://github.com/crossplane/crossplane-runtime/blob/84e629b9589852df1322ff1eae4c6e7639cf6e99/pkg/reconciler/managed/reconciler.go#L637 diff --git a/design/proposal-controller-code-generation.md b/design/proposal-controller-code-generation.md index ee0ec014f..4a8d3159d 100644 --- a/design/proposal-controller-code-generation.md +++ b/design/proposal-controller-code-generation.md @@ -326,7 +326,7 @@ other supported languages inc javascript, python and .NET. [Magic Moduels]: https://github.com/GoogleCloudPlatform/magic-modules [OpenAPI Directory]: https://github.com/APIs-guru/openapi-directory [Pulumi]: https://github.com/pulumi/pulumi -[Reference Resolvers]: https://github.com/crossplane/crossplane/blob/master/design/one-pager-cross-resource-referencing.md +[Reference Resolvers]: https://github.com/crossplane/crossplane/blob/main/design/one-pager-cross-resource-referencing.md [alibaba-ros]: https://www.alibabacloud.com/product/ros [amazon-aso]: https://github.com/aws/aws-service-operator-k8s [aso-v2-architecture]: https://github.com/jaypipes/aws-service-operator-k8s/blob/91e63414efb00564662adf6eaafc20e124a3b2d3/docs/code-generation.md diff --git a/hack/linter-violation.tmpl b/hack/linter-violation.tmpl index 1a91dfa06..d1f43403f 100644 --- a/hack/linter-violation.tmpl +++ b/hack/linter-violation.tmpl @@ -1,3 +1,3 @@ `{{violation.rule}}`: {{violation.message}} -Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/master/CONTRIBUTING.md#coding-style-and-linting) for more information. \ No newline at end of file +Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/main/CONTRIBUTING.md#coding-style-and-linting) for more information. \ No newline at end of file From 972284c372ee31be2b6117dd6a0055068de818b0 Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Tue, 3 Sep 2024 16:20:10 +0200 Subject: [PATCH 360/370] Updates e2e-framework dependency to latest version Signed-off-by: Jean du Plessis --- go.mod | 14 +++++++------- go.sum | 10 ++++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 11a68403a..0f0d1237c 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/crossplane/crossplane -go 1.22.0 +go 1.22.3 -toolchain go1.22.3 +toolchain go1.22.5 require ( dario.cat/mergo v1.0.0 @@ -28,19 +28,19 @@ require ( google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.34.2 - k8s.io/api v0.30.0 + k8s.io/api v0.30.1 k8s.io/apiextensions-apiserver v0.30.0 - k8s.io/apimachinery v0.30.0 + k8s.io/apimachinery v0.30.1 k8s.io/apiserver v0.30.0 k8s.io/cli-runtime v0.29.1 - k8s.io/client-go v0.30.0 + k8s.io/client-go v0.30.1 k8s.io/code-generator v0.30.0 k8s.io/kubectl v0.29.1 k8s.io/metrics v0.29.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/controller-runtime v0.18.2 sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/e2e-framework v0.3.0 + sigs.k8s.io/e2e-framework v0.4.0 sigs.k8s.io/kind v0.20.0 sigs.k8s.io/yaml v1.4.0 ) @@ -197,7 +197,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.30.0 // indirect + k8s.io/component-base v0.30.1 // indirect k8s.io/klog/v2 v2.120.1 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/go.sum b/go.sum index 5dbee703d..7b35b728f 100644 --- a/go.sum +++ b/go.sum @@ -638,20 +638,28 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= @@ -672,6 +680,8 @@ sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= sigs.k8s.io/e2e-framework v0.3.0/go.mod h1:C+ef37/D90Dc7Xq1jQnNbJYscrUGpxrWog9bx2KIa+c= +sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= +sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= From 3417cc6b534da87a0e372b618a64947ed94ec3b7 Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Tue, 3 Sep 2024 16:40:02 +0200 Subject: [PATCH 361/370] Update e2e tests by adding a description for features.New calls. Signed-off-by: Jean du Plessis --- test/e2e/README.md | 46 ++++++++++++++------------- test/e2e/apiextensions_test.go | 37 ++++++--------------- test/e2e/environmentconfig_test.go | 12 +++---- test/e2e/install_test.go | 11 ++----- test/e2e/pkg_test.go | 16 +++------- test/e2e/realtimecompositions_test.go | 2 +- test/e2e/usage_test.go | 2 +- 7 files changed, 49 insertions(+), 77 deletions(-) diff --git a/test/e2e/README.md b/test/e2e/README.md index 08d27c758..8293338f9 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -115,7 +115,7 @@ We try to follow this pattern when adding a new test: `github.com/crossplane/crossplane/test/e2e/funcs`, or add new ones there if needed. 1. Prefer using the Fluent APIs to define features - (`features.New(...).WithSetup(...).Assess(...).WithTeardown(...).Feature()`). + (`features.NewWithDescription(...).WithSetup(...).AssessWithDescription(...).WithTeardown(...).Feature()`). 1. `features.Table` should be used only to define multiple self-contained assessments to be run sequentially, but without assuming any ordering among them, similarly to the usual table driven style we adopt for unit testing. @@ -124,8 +124,8 @@ We try to follow this pattern when adding a new test: a feature, as they allow to provide a description. 1. Use short but explicative `CamelCase` sentences as descriptions for everything used to define the name of tests/subtests, e.g. - `features.New("CrossplaneUpgrade", ...)` `WithSetup("InstallProviderNop", - ...)`, `Assess("ProviderNopIsInstalled", ...)`, + `features.NewWithDescription("CrossplaneUpgrade", ...)` `WithSetup("InstallProviderNop", + ...)`, `AssessWithDescription("ProviderNopIsInstalled", ...)`, `WithTeardown("UninstallProviderNop", ...)`. 1. Use the `Setup` and `Teardown` phases to define respectively actions that are not strictly part of the feature being tested, but are needed to make it @@ -163,29 +163,31 @@ Here an example of a test following the above guidelines: ```go package e2e +import "sigs.k8s.io/e2e-framework/pkg/features" + // ... // TestSomeFeature ... func TestSomeFeature(t *testing.T) { - manifests := "test/e2e/manifests/pkg/some-area/some-feature" - namespace := "some-namespace" - // ... other variables or constants ... - - environment.Test(t, - features.New(t.Name()). - WithLabel(LabelArea, ...). - WithLabel(LabelSize, ...). - WithLabel(config.LabelTestSuite, config.TestSuiteDefault). - // ... - WithSetup("ReadyPrerequisites", ... ). - // ... other setup steps ... - Assess("DoSomething", ... ). - Assess("SomethingElseIsInSomeState", ... ). - // ... other assess steps ... - WithTeardown("DeleteCreatedResources", ...). - // ... other teardown steps ... - Feature(), - ) + manifests := "test/e2e/manifests/pkg/some-area/some-feature" + namespace := "some-namespace" + // ... other variables or constants ... + + environment.Test(t, + features.NewWithDescription(t.Name(), ...). + WithLabel(LabelArea, ...). + WithLabel(LabelSize, ...). + WithLabel(config.LabelTestSuite, config.TestSuiteDefault). + // ... + WithSetup("ReadyPrerequisites", ...). + // ... other setup steps ... + AssessWithDescription("DoSomething", ...). + AssessWithDescription("SomethingElseIsInSomeState", ...). + // ... other assess steps ... + WithTeardown("DeleteCreatedResources", ...). + // ... other teardown steps ... + Feature(), + ) } // ... diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index faeb3f27c..b69d27196 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -60,10 +60,6 @@ var nopList = composed.NewList(composed.FromReferenceToList(corev1.ObjectReferen Kind: "NopResource", })) -// TestCompositionMinimal tests Crossplane's Composition functionality, -// checking that a claim using a very minimal Composition (with no patches, -// transforms, or functions) will become available when its composed -// resources do. func TestCompositionMinimal(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/minimal" @@ -77,7 +73,7 @@ func TestCompositionMinimal(t *testing.T) { })) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that a claim using a very minimal Composition (with no patches, transforms, or functions) will become available when its composed resources do."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -104,9 +100,6 @@ func TestCompositionMinimal(t *testing.T) { ) } -// TestCompositionInvalidComposed tests Crossplane's Composition functionality, -// checking that although a composed resource is invalid, i.e. it didn't apply -// successfully. func TestCompositionInvalidComposed(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/invalid-composed" @@ -119,7 +112,7 @@ func TestCompositionInvalidComposed(t *testing.T) { })) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that although a composed resource is invalid, i.e. it didn't apply successfully."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -148,14 +141,10 @@ func TestCompositionInvalidComposed(t *testing.T) { ) } -// TestCompositionPatchAndTransform tests Crossplane's Composition functionality, -// checking that a claim using patch-and-transform Composition will become -// available when its composed resources do, and have a field derived from -// the patch. func TestCompositionPatchAndTransform(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/patch-and-transform" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that a claim using patch-and-transform Composition will become available when its composed resources do, and have a field derived from the patch."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -183,13 +172,10 @@ func TestCompositionPatchAndTransform(t *testing.T) { ) } -// TestCompositionRealtimeRevisionSelection tests Crossplane's Composition -// functionality to react in realtime to changes in a Composition by selecting -// the new CompositionRevision and reconcile the XRs. func TestCompositionRealtimeRevisionSelection(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/realtime-revision-selection" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality to react in realtime to changes in a Composition by selecting the new CompositionRevision and reconcile the XRs."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -224,7 +210,7 @@ func TestCompositionRealtimeRevisionSelection(t *testing.T) { func TestCompositionFunctions(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/functions" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests the correct functioning of composition functions ensuring that the composed resources are created, conditions are met, fields are patched, and resources are properly cleaned up when deleted."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -255,7 +241,7 @@ func TestCompositionFunctions(t *testing.T) { func TestPropagateFieldsRemovalToXR(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that field removals in a claim are correctly propagated to the associated composite resource (XR), ensuring that updates and deletions are properly synchronized, and that the status from the XR is accurately reflected back to the claim."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -309,7 +295,7 @@ func TestPropagateFieldsRemovalToXR(t *testing.T) { func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that field removals in a composite resource (XR) are correctly propagated after upgrading the field managers from CSA to SSA, verifying that the upgrade process does not interfere with the synchronization of fields between the claim and the XR."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -374,13 +360,10 @@ func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { ) } -// TestPropagateFieldsRemovalToComposed tests Crossplane's end-to-end SSA syncing -// functionality of clear propagation of fields from claim->XR->MR, when existing -// composition and resources are migrated from native P-and-T to functions pipeline mode. func TestPropagateFieldsRemovalToComposed(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's end-to-end SSA syncing functionality of clear propagation of fields from claim->XR->MR, when existing composition and resources are migrated from native P-and-T to functions pipeline mode."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -426,7 +409,7 @@ func TestPropagateFieldsRemovalToComposed(t *testing.T) { func TestCompositionSelection(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/composition-selection" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that label selectors in a claim are correctly propagated to the composite resource (XR), ensuring that the appropriate composition is selected and remains consistent even after updates to the label selectors."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -485,7 +468,7 @@ func TestCompositionSelection(t *testing.T) { func TestBindToExistingXR(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/bind-existing-xr" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a new claim can successfully bind to an existing composite resource (XR), ensuring that the XR’s fields are updated according to the claim’s specifications and that the XR is correctly managed when the claim is deleted."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/environmentconfig_test.go b/test/e2e/environmentconfig_test.go index f69fd1661..d83832d8a 100644 --- a/test/e2e/environmentconfig_test.go +++ b/test/e2e/environmentconfig_test.go @@ -49,7 +49,7 @@ func TestEnvironmentConfigDefault(t *testing.T) { subfolder := "default" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment config defaults are correctly applied and annotated in managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -101,7 +101,7 @@ func TestEnvironmentResolutionOptional(t *testing.T) { subfolder := "resolutionOptional" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that optional environment resolution works correctly, ensuring that managed resources are properly annotated with values derived from environment config."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -153,7 +153,7 @@ func TestEnvironmentResolveIfNotPresent(t *testing.T) { subfolder := "resolveIfNotPresent" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment configurations are resolved and applied only if they are not already present, verifying that additional environment configurations do not override existing annotations."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -215,7 +215,7 @@ func TestEnvironmentResolveAlways(t *testing.T) { subfolder := "resolveAlways" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment configurations are always resolved and applied to managed resources, ensuring that any changes in environment configurations are reflected in the annotations of the managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -277,7 +277,7 @@ func TestEnvironmentConfigMultipleMaxMatchNil(t *testing.T) { subfolder := "multipleModeMaxMatchNil" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that when multiple environment configurations are available, the correct maximum match is selected and applied, ensuring that the managed resources are annotated correctly."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -329,7 +329,7 @@ func TestEnvironmentConfigMultipleMaxMatch1(t *testing.T) { subfolder := "multipleModeMaxMatch1" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that when multiple environment configurations are available, the configuration with the highest match is correctly selected and applied to the managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). diff --git a/test/e2e/install_test.go b/test/e2e/install_test.go index 64e449cef..6b9d3844d 100644 --- a/test/e2e/install_test.go +++ b/test/e2e/install_test.go @@ -40,13 +40,6 @@ const LabelAreaLifecycle = "lifecycle" const TestSuiteLifecycle = "lifecycle" -// TestCrossplaneLifecycle tests two features expecting them to be run in order: -// - CrossplaneUninstall: Test that it's possible to cleanly uninstall Crossplane, even -// after having created and deleted a claim. -// - CrossplaneUpgrade: Test that it's possible to upgrade Crossplane from the most recent -// stable Helm chart to the one we're testing, even when a claim exists. This -// expects Crossplane not to be installed. -// // Note: First time Installation is tested as part of the environment setup, // if not disabled explicitly. func TestCrossplaneLifecycle(t *testing.T) { @@ -54,7 +47,7 @@ func TestCrossplaneLifecycle(t *testing.T) { environment.Test(t, // Test that it's possible to cleanly uninstall Crossplane, even after // having created and deleted a claim. - features.New(t.Name()+"Uninstall"). + features.NewWithDescription(t.Name()+"Uninstall", "Test that it's possible to cleanly uninstall Crossplane, even after having created and deleted a claim."). WithLabel(LabelArea, LabelAreaLifecycle). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -97,7 +90,7 @@ func TestCrossplaneLifecycle(t *testing.T) { funcs.ResourceDeletedWithin(3*time.Minute, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}), )). Feature(), - features.New(t.Name()+"Upgrade"). + features.NewWithDescription(t.Name()+"Upgrade", "Test that it's possible to upgrade Crossplane from the most recent stable Helm chart to the one we're testing, even when a claim exists. This expects Crossplane not to be installed."). WithLabel(LabelArea, LabelAreaLifecycle). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/pkg_test.go b/test/e2e/pkg_test.go index 9355b0a72..34dc6daa4 100644 --- a/test/e2e/pkg_test.go +++ b/test/e2e/pkg_test.go @@ -37,13 +37,11 @@ import ( // Providers, Configurations, etc). const LabelAreaPkg = "pkg" -// TestConfigurationPullFromPrivateRegistry tests that a Configuration can be -// installed from a private registry using a package pull secret. func TestConfigurationPullFromPrivateRegistry(t *testing.T) { manifests := "test/e2e/manifests/pkg/configuration/private" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a Configuration can be installed from a private registry using a package pull secret."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -59,13 +57,11 @@ func TestConfigurationPullFromPrivateRegistry(t *testing.T) { ) } -// TestConfigurationWithDependency tests that a Configuration with a dependency -// on a Provider will become healthy when the Provider becomes healthy. func TestConfigurationWithDependency(t *testing.T) { manifests := "test/e2e/manifests/pkg/configuration/dependency" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a Configuration with a dependency on a Provider will become healthy when the Provider becomes healthy."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -90,12 +86,10 @@ func TestConfigurationWithDependency(t *testing.T) { } func TestProviderUpgrade(t *testing.T) { - // Test that we can upgrade a provider to a new version, even when a managed - // resource has been created. manifests := "test/e2e/manifests/pkg/provider" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that we can upgrade a provider to a new version, even when a managed resource has been created."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -126,7 +120,7 @@ func TestProviderUpgrade(t *testing.T) { func TestDeploymentRuntimeConfig(t *testing.T) { manifests := "test/e2e/manifests/pkg/deployment-runtime-config" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that custom configurations in the deployment runtime do not disrupt the functionality of the resources, ensuring that deployments, services, and service accounts are created and configured correctly according to the specified runtime settings."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -193,7 +187,7 @@ func TestDeploymentRuntimeConfig(t *testing.T) { func TestExternallyManagedServiceAccount(t *testing.T) { manifests := "test/e2e/manifests/pkg/externally-managed-service-account" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that an externally managed service account is not owned by the deployment while verifying that the deployment correctly references the service account as specified in the runtime configuration."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/realtimecompositions_test.go b/test/e2e/realtimecompositions_test.go index ad49d5c59..f206c57fd 100644 --- a/test/e2e/realtimecompositions_test.go +++ b/test/e2e/realtimecompositions_test.go @@ -54,7 +54,7 @@ func TestRealtimeCompositions(t *testing.T) { withTestLabels := resources.WithLabelSelector(labels.FormatLabels(map[string]string{"realtime-compositions": "true"})) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests scenarios for compositions with realtime reconciles through MR updates."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). diff --git a/test/e2e/usage_test.go b/test/e2e/usage_test.go index 065c504b7..ec69c7940 100644 --- a/test/e2e/usage_test.go +++ b/test/e2e/usage_test.go @@ -127,7 +127,7 @@ func TestUsageComposition(t *testing.T) { })) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests scenarios for Crossplane's `Usage` resource as part of a composition."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). From ae79fa4bbcfe592cd17da1a4863d4c93436dbef7 Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Tue, 3 Sep 2024 16:54:11 +0200 Subject: [PATCH 362/370] Add generated files from earthly +generate Signed-off-by: Jean du Plessis --- ....crossplane.io_configurationrevisions.yaml | 7 +- .../pkg.crossplane.io_configurations.yaml | 7 +- .../pkg.crossplane.io_controllerconfigs.yaml | 112 ++++++++++-- ...rossplane.io_deploymentruntimeconfigs.yaml | 168 +++++++++++++++--- .../pkg.crossplane.io_functionrevisions.yaml | 14 +- cluster/crds/pkg.crossplane.io_functions.yaml | 14 +- .../pkg.crossplane.io_providerrevisions.yaml | 7 +- cluster/crds/pkg.crossplane.io_providers.yaml | 7 +- go.sum | 10 -- 9 files changed, 288 insertions(+), 58 deletions(-) diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index d1b5ef789..bc8c4683f 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -113,10 +113,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index 68281a426..325aacd96 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -99,10 +99,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 50dbff689..e5313498e 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1025,10 +1025,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap or its key @@ -1087,10 +1092,15 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret or its key must @@ -1120,10 +1130,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap must be defined @@ -1138,10 +1153,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret must be defined @@ -1179,10 +1199,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2000,10 +2025,15 @@ spec: More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2039,10 +2069,15 @@ spec: to OpenStack. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2108,10 +2143,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the ConfigMap or its @@ -2144,10 +2184,15 @@ spec: secret object contains more than one secret, all secret references are passed. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2641,10 +2686,15 @@ spec: scripts. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2836,10 +2886,15 @@ spec: and initiator authentication properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -3101,10 +3156,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the ConfigMap @@ -3236,10 +3296,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional field specify whether the @@ -3370,10 +3435,15 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -3417,10 +3487,15 @@ spec: sensitive information. If this is not provided, Login operation will fail. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -3536,10 +3611,15 @@ spec: credentials. If not specified, default values will be attempted. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index 35a599ab7..77bcab8a3 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -1284,10 +1284,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -1354,10 +1359,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -1391,10 +1401,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -1411,10 +1426,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -2821,10 +2841,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -2891,10 +2916,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -2928,10 +2958,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -2948,10 +2983,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -4262,10 +4302,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string required: - name @@ -4358,10 +4403,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -4428,10 +4478,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -4465,10 +4520,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -4485,10 +4545,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -6590,10 +6655,15 @@ spec: More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6629,10 +6699,15 @@ spec: to OpenStack. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6699,10 +6774,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the @@ -6735,10 +6815,15 @@ spec: secret object contains more than one secret, all secret references are passed. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7248,10 +7333,15 @@ spec: scripts. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7447,10 +7537,15 @@ spec: for iSCSI target and initiator authentication properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7722,10 +7817,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify @@ -7873,10 +7973,15 @@ spec: type: array x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional field specify @@ -8009,10 +8114,15 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -8058,10 +8168,15 @@ spec: sensitive information. If this is not provided, Login operation will fail. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -8181,10 +8296,15 @@ spec: credentials. If not specified, default values will be attempted. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 0de8d3141..6951631a1 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -125,10 +125,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -441,10 +446,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index e6183b3e9..76ed888fc 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -108,10 +108,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -324,10 +329,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index ecafb8762..d1abe2dc1 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -125,10 +125,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 677fbf9c5..79b33ab12 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -110,10 +110,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/go.sum b/go.sum index 7b35b728f..e8b05366b 100644 --- a/go.sum +++ b/go.sum @@ -636,28 +636,20 @@ gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= -k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= -k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= -k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= -k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= -k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= -k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= -k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= @@ -678,8 +670,6 @@ sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLql sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= -sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= -sigs.k8s.io/e2e-framework v0.3.0/go.mod h1:C+ef37/D90Dc7Xq1jQnNbJYscrUGpxrWog9bx2KIa+c= sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= From 0efa870516bfa32db61df30d301041a5e6429fe5 Mon Sep 17 00:00:00 2001 From: "crossplane-renovate[bot]" <166709878+crossplane-renovate[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 08:09:59 +0000 Subject: [PATCH 363/370] chore(deps): update actions/checkout digest to 692973e --- .github/workflows/backport.yml | 2 +- .github/workflows/ci.yml | 16 ++++++++-------- .github/workflows/commands.yml | 2 +- .github/workflows/promote.yml | 2 +- .github/workflows/scan.yaml | 2 +- .github/workflows/tag.yml | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index ec43b1681..b2b579161 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,7 +22,7 @@ jobs: if: github.event.pull_request.merged steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e872dbc4b..3f3b0d369 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 @@ -75,7 +75,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 @@ -111,7 +111,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 @@ -152,7 +152,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Run Trivy vulnerability scanner in fs mode uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 @@ -175,7 +175,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 @@ -227,7 +227,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 @@ -289,7 +289,7 @@ jobs: docker-images: false - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 @@ -389,7 +389,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Buf uses: bufbuild/buf-setup-action@v1 diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index b7182a88b..bbb7bbf91 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -21,7 +21,7 @@ jobs: permission-level: write - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index ce69e8393..88db21398 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -35,7 +35,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Earthly uses: earthly/actions-setup@v1 diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 745f681b8..53237354d 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -17,7 +17,7 @@ jobs: supported_releases: ${{ steps.get-releases.outputs.supported_releases }} steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml index 22994508d..9509a3e2a 100644 --- a/.github/workflows/tag.yml +++ b/.github/workflows/tag.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Create Tag uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1 From 74b63c88926613253f4795c3e86be158e0449457 Mon Sep 17 00:00:00 2001 From: Jean du Plessis Date: Thu, 5 Sep 2024 18:22:38 +0200 Subject: [PATCH 364/370] Remove unnecessary function comment. Signed-off-by: Jean du Plessis --- test/e2e/usage_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/e2e/usage_test.go b/test/e2e/usage_test.go index ec69c7940..2f7826708 100644 --- a/test/e2e/usage_test.go +++ b/test/e2e/usage_test.go @@ -116,8 +116,6 @@ func TestUsageStandalone(t *testing.T) { ) } -// TestUsageComposition tests scenarios for Crossplane's `Usage` resource as part -// of a composition. func TestUsageComposition(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/usage/composition" From c0afe70bace1ce39efc2b74ba604fe36ec37bf1b Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 5 Sep 2024 16:37:59 -0700 Subject: [PATCH 365/370] Make composition revision numbers mutable They were made immutable by mistake. Signed-off-by: Nic Cope --- .../v1/composition_revision_types.go | 6 ++++- ...zz_generated.composition_revision_types.go | 6 ++++- ...ns.crossplane.io_compositionrevisions.yaml | 24 ++++++++++++------- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index fc8c07886..301262e51 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -126,7 +126,11 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // + // This number can change. When a Composition transitions from state A + // -> B -> A there will be only two CompositionRevisions. Crossplane will + // edit the original CompositionRevision to change its revision number from + // 0 to 2. Revision int64 `json:"revision"` } diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index 680575832..f110dccb7 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -128,7 +128,11 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // + // This number can change. When a Composition transitions from state A + // -> B -> A there will be only two CompositionRevisions. Crossplane will + // edit the original CompositionRevision to change its revision number from + // 0 to 2. Revision int64 `json:"revision"` } diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 22c60210f..682c12578 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1587,12 +1587,16 @@ spec: type: object type: array revision: - description: Revision number. Newer revisions have larger numbers. + description: |- + Revision number. Newer revisions have larger numbers. + + + This number can change. When a Composition transitions from state A + -> B -> A there will be only two CompositionRevisions. Crossplane will + edit the original CompositionRevision to change its revision number from + 0 to 2. format: int64 type: integer - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf writeConnectionSecretsToNamespace: description: |- WriteConnectionSecretsToNamespace specifies the namespace in which the @@ -3234,12 +3238,16 @@ spec: type: object type: array revision: - description: Revision number. Newer revisions have larger numbers. + description: |- + Revision number. Newer revisions have larger numbers. + + + This number can change. When a Composition transitions from state A + -> B -> A there will be only two CompositionRevisions. Crossplane will + edit the original CompositionRevision to change its revision number from + 0 to 2. format: int64 type: integer - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf writeConnectionSecretsToNamespace: description: |- WriteConnectionSecretsToNamespace specifies the namespace in which the From 519e70726ebfabf30f54c9852af779d7095672e7 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 9 Sep 2024 15:02:02 +0000 Subject: [PATCH 366/370] Fix "Missing node in tree error" after updating a package source Delete packages in lock having same name and distinct identifier. Signed-off-by: Jose Francisco Dillet Alfonso --- .../controller/pkg/revision/dependency.go | 18 ++++++ .../pkg/revision/dependency_test.go | 60 +++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 9da209088..69e381d87 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -137,6 +137,24 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje Dependencies: sources, } + // Delete packages in lock with same name and distinct source + // This is a corner case when source is updated but image SHA is not (i.e. relocate same image + // to another registry) + for _, lp := range lock.Packages { + if self.Name == lp.Name && + self.Type == lp.Type && + self.Source != lp.Identifier() { + if err := m.RemoveSelf(ctx, pr); err != nil { + return found, installed, invalid, err + } + // refresh the lock to be in sync with the contents + if err = m.client.Get(ctx, types.NamespacedName{Name: lockName}, lock); err != nil { + return found, installed, invalid, err + } + break + } + } + prExists := false for _, lp := range lock.Packages { if lp.Name == pr.GetName() { diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index 9b3dd0ee2..f8e79d009 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -42,6 +42,7 @@ var _ DependencyManager = &PackageDependencyManager{} func TestResolve(t *testing.T) { errBoom := errors.New("boom") + mockUpdateCallCount := 0 type args struct { dep *PackageDependencyManager @@ -553,9 +554,68 @@ func TestResolve(t *testing.T) { invalid: 0, }, }, + "SuccessfulLockPackageSourceMismatch": { + reason: "Should not return error if source in packages does not match provider revision package.", + args: args{ + dep: &PackageDependencyManager{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + l := obj.(*v1beta1.Lock) + if mockUpdateCallCount < 1 { + l.Packages = []v1beta1.LockPackage{ + { + Name: "config-nop-a-abc123", + // Source mistmatch provider revision package + Source: "hasheddan/config-nop-b", + }, + } + } else { + l.Packages = []v1beta1.LockPackage{} + } + return nil + }), + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { + mockUpdateCallCount++ + return nil + }, + }, + newDag: func() dag.DAG { + return &dagfake.MockDag{ + MockInit: func(_ []dag.Node) ([]dag.Node, error) { + return []dag.Node{}, nil + }, + MockTraceNode: func(s string) (map[string]dag.Node, error) { + if s == "hasheddan/config-nop-a" { + return map[string]dag.Node{ + s: &v1beta1.Dependency{}, + }, nil + } + return nil, errors.New("missing node in tree") + }, + MockAddOrUpdateNodes: func(_ ...dag.Node) {}, + } + }, + }, + meta: &pkgmetav1.Configuration{}, + pr: &v1.ConfigurationRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-nop-a-abc123", + }, + Spec: v1.PackageRevisionSpec{ + Package: "hasheddan/config-nop-a:v0.0.1", + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + want: want{ + total: 1, + installed: 1, + }, + }, } for name, tc := range cases { + mockUpdateCallCount = 0 t.Run(name, func(t *testing.T) { total, installed, invalid, err := tc.args.dep.Resolve(context.TODO(), tc.args.meta, tc.args.pr) From 991ac5fbd0c0b7edd56c2b1ac9912867dff022f6 Mon Sep 17 00:00:00 2001 From: Jose Francisco Dillet Alfonso Date: Tue, 10 Sep 2024 09:00:09 +0000 Subject: [PATCH 367/370] Do not wrap line Signed-off-by: Jose Francisco Dillet Alfonso --- internal/controller/pkg/revision/dependency.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 69e381d87..af7b76f0b 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -141,9 +141,7 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje // This is a corner case when source is updated but image SHA is not (i.e. relocate same image // to another registry) for _, lp := range lock.Packages { - if self.Name == lp.Name && - self.Type == lp.Type && - self.Source != lp.Identifier() { + if self.Name == lp.Name && self.Type == lp.Type && self.Source != lp.Identifier() { if err := m.RemoveSelf(ctx, pr); err != nil { return found, installed, invalid, err } From fe17a8ddf498a9bb94a91e335ff5c649690b695f Mon Sep 17 00:00:00 2001 From: Christopher Haar Date: Tue, 10 Sep 2024 14:49:32 +0200 Subject: [PATCH 368/370] fix(ga): remove beta for CompositionModePipeline Signed-off-by: Christopher Haar --- apis/apiextensions/v1/composition_common.go | 3 --- apis/apiextensions/v1beta1/zz_generated.composition_common.go | 3 --- 2 files changed, 6 deletions(-) diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 4819187e8..83b5bed70 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -48,9 +48,6 @@ const ( // CompositionModePipeline indicates that a Composition specifies a pipeline // of Composition Functions, each of which is responsible for producing // composed resources that Crossplane should create or update. - // - // THIS IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. CompositionModePipeline CompositionMode = "Pipeline" ) diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index 9fb73ad3f..ba1e7dfdb 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -50,9 +50,6 @@ const ( // CompositionModePipeline indicates that a Composition specifies a pipeline // of Composition Functions, each of which is responsible for producing // composed resources that Crossplane should create or update. - // - // THIS IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. CompositionModePipeline CompositionMode = "Pipeline" ) From 10f8f2daf7b91a2684df97a734dc6bce1795f90e Mon Sep 17 00:00:00 2001 From: Eric Deitrick <149844727+DE-Wizard@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:37:02 -0400 Subject: [PATCH 369/370] Update ADOPTERS.md Signed-off-by: Eric Deitrick <149844727+DE-Wizard@users.noreply.github.com> --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 4a764ac6a..1f996efd3 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -91,3 +91,4 @@ This list is sorted in the order that organizations were added to it. | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | | [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | | [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers in our production environments. | +| [Arcfield](https://arcfield.com/) | [@DE-Wizard](https://github.com/DE-Wizard) | Our entire cloud architecture was redesigned from the ground up using [Crossplane](https://www.crossplane.io/) to manage the cloud resources and [Flux](https://fluxcd.io/) to manage feeding [Crossplane](https://www.crossplane.io/) with its configurations. We have architected a Control - Workload cluster configuration that spans multiple regions and providers. The combination of the 2 controllers allowed us to more tightly control environment changes and apply drift correction to mitigate manual configuration changes that may be unauthorized. | From 56cb2038610983db8c83a949fbb7afb3ed647fa5 Mon Sep 17 00:00:00 2001 From: Eric Deitrick <149844727+DE-Wizard@users.noreply.github.com> Date: Thu, 12 Sep 2024 13:05:26 -0400 Subject: [PATCH 370/370] Update ADOPTERS.md Signed-off-by: Eric Deitrick <149844727+DE-Wizard@users.noreply.github.com> --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 1f996efd3..928d23fdb 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -91,4 +91,4 @@ This list is sorted in the order that organizations were added to it. | [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | | [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | | [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers in our production environments. | -| [Arcfield](https://arcfield.com/) | [@DE-Wizard](https://github.com/DE-Wizard) | Our entire cloud architecture was redesigned from the ground up using [Crossplane](https://www.crossplane.io/) to manage the cloud resources and [Flux](https://fluxcd.io/) to manage feeding [Crossplane](https://www.crossplane.io/) with its configurations. We have architected a Control - Workload cluster configuration that spans multiple regions and providers. The combination of the 2 controllers allowed us to more tightly control environment changes and apply drift correction to mitigate manual configuration changes that may be unauthorized. | +| [Arcfield](https://arcfield.com/) | [@DE-Wizard](https://github.com/DE-Wizard) | Our entire cloud architecture was redesigned from the ground up using [Crossplane](https://www.crossplane.io/) to manage the cloud resources and [Flux](https://fluxcd.io/) to manage feeding [Crossplane](https://www.crossplane.io/) with its configurations. We have architected a Control - Workload cluster configuration that spans multiple regions and providers. The combination of the 2 controllers allowed us to more tightly control environment changes and apply drift correction to mitigate manual configuration changes that may be unauthorized. Our combination covers both dev and production environments with the production environment Master Control Cluster having dominion over both in the end. |