diff --git a/.github/ISSUE_TEMPLATE/patch_release.md b/.github/ISSUE_TEMPLATE/patch_release.md index 3bc5f0552..ccc19617b 100644 --- a/.github/ISSUE_TEMPLATE/patch_release.md +++ b/.github/ISSUE_TEMPLATE/patch_release.md @@ -29,7 +29,7 @@ examples of each step, assuming vX.Y.Z is being cut. - [ ] `xp/getting-started-with-azure` - [ ] `xp/getting-started-with-gcp` - [ ] Run the [Promote workflow][promote-workflow] with channel `stable` on the `release-X.Y` branch and verified that the tagged build version exists on the [releases.crossplane.io] `stable` channel at `stable/vX.Y.Z/...`. -- [ ] Published a [new release] for the tagged version, with the same name as the version and descriptive release notes, taking care of generating the changes list selecting as "Previous tag" `vX.Y.`, so the previous patch release for the same minor. +- [ ] Published a [new release] for the tagged version, with the same name as the version and descriptive release notes, taking care of generating the changes list selecting as "Previous tag" `vX.Y.`, so the previous patch release for the same minor. Before publishing the release notes, set them as Draft and ask the rest of the team to double check them. - [ ] Ensured that users have been notified of the release on all communication channels: - [ ] Slack: `#announcements` channel on Crossplane's Slack workspace. - [ ] Twitter: reach out to a Crossplane maintainer or steering committee member, see [OWNERS.md][owners]. diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 679118093..5f786ce0e 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -22,7 +22,7 @@ this issue for posterity. Refer to this [prior release issue][release-1.11.0] fo examples of each step, assuming release vX.Y.0 is being cut. - [ ] Prepared the release branch `release-X.Y` at the beginning of [Code Freeze]: - - [ ] Created the release branch. + - [ ] Created the release branch using the [GitHub UI][create-branch]. - [ ] Created and merged an empty commit to the `master` branch, if required to have it at least one commit ahead of the release branch. - [ ] Run the [Tag workflow][tag-workflow] on the `master` branch with the release candidate tag for the next release `vX.Y+1.0-rc.0`. - [ ] Opened a [docs release issue]. @@ -34,7 +34,7 @@ examples of each step, assuming release vX.Y.0 is being cut. - [ ] `xp/getting-started-with-azure` - [ ] `xp/getting-started-with-gcp` - [ ] Run the [Promote workflow][promote-workflow] with channel `stable` on the `release-X.Y` branch and verified that the tagged build version exists on the [releases.crossplane.io] `stable` channel at `stable/vX.Y.0/...`. -- [ ] Published a [new release] for the tagged version, with the same name as the version and descriptive release notes, taking care of generating the changes list selecting as "Previous tag" `vX..0`, so the first of the releases for the previous minor. +- [ ] Published a [new release] for the tagged version, with the same name as the version and descriptive release notes, taking care of generating the changes list selecting as "Previous tag" `vX..0`, so the first of the releases for the previous minor. Before publishing the release notes, set them as Draft and ask the rest of the team to double check them. - [ ] Checked that the [docs release issue] created previously has been completed. - [ ] Updated, in a single PR, the following on `master`: - [ ] The [releases table] in the `README.md`, removing the now old unsupported release and adding the new one. @@ -49,6 +49,7 @@ examples of each step, assuming release vX.Y.0 is being cut. [Code Freeze]: https://docs.crossplane.io/knowledge-base/guides/release-cycle/#code-freeze [ci-workflow]: https://github.com/crossplane/crossplane/actions/workflows/ci.yml [configurations-workflow]: https://github.com/crossplane/crossplane/actions/workflows/configurations.yml +[create-branch]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository [docs release issue]: https://github.com/crossplane/docs/issues/new?assignees=&labels=release&template=new_release.md&title=Release+Crossplane+version...+ [new release]: https://github.com/crossplane/crossplane/releases/new [owners]: https://github.com/crossplane/crossplane/blob/master/OWNERS.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 67bce092b..48d3bb8fa 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -27,5 +27,6 @@ I have: - [ ] Added or updated unit **and** E2E tests for my change. - [ ] Run `make reviewable` to ensure this PR is ready for review. - [ ] Added `backport release-x.y` labels to auto-backport this PR if necessary. +- [ ] Opened a PR updating the [docs](https://docs.crossplane.io/contribute/contribute/), if necessary. -[contribution process]: https://git.io/fj2m9 \ No newline at end of file +[contribution process]: https://git.io/fj2m9 diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 502c999a7..dcef14d35 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -2,7 +2,8 @@ "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "config:base", - "helpers:pinGitHubActionDigests" + "helpers:pinGitHubActionDigests", + ":semanticCommits" ], // We only want renovate to rebase PRs when they have conflicts, // default "auto" mode is not required. @@ -11,12 +12,9 @@ "prConcurrentLimit": 5, // The branches renovate should target // PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.10","release-1.11","release-1.12"], + "baseBranches": ["master","release-1.11","release-1.12","release-1.13"], "ignorePaths": ["design/**"], "postUpdateOptions": ["gomodTidy"], -// By default renovate will auto detect whether semantic commits have been used -// in the recent history and comply with that, we explicitly disable it - "semanticCommits": "disabled", // All PRs should have a label "labels": ["automated"], "regexManagers": [ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 802daa49f..4091a83e1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.20.5' + GO_VERSION: '1.20.7' GOLANGCI_VERSION: 'v1.53.3' DOCKER_BUILDX_VERSION: 'v0.10.0' @@ -136,7 +136,7 @@ jobs: - name: Find the Go Build Cache id: go - run: echo "::set-output name=cache::$(make go.cachedir)" + run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3 @@ -156,12 +156,12 @@ jobs: run: make vendor vendor.check - name: Initialize CodeQL - uses: github/codeql-action/init@f6e388ebf0efc915c6c5b165b019ee61a6746a38 # v2 + uses: github/codeql-action/init@0ba4244466797eb048eb91a6cd43d5c03ca8bd05 # v2 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f6e388ebf0efc915c6c5b165b019ee61a6746a38 # v2 + uses: github/codeql-action/analyze@0ba4244466797eb048eb91a6cd43d5c03ca8bd05 # v2 trivy-scan-fs: runs-on: ubuntu-22.04 @@ -204,7 +204,7 @@ jobs: - name: Find the Go Build Cache id: go - run: echo "::set-output name=cache::$(make go.cachedir)" + run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3 @@ -238,7 +238,7 @@ jobs: if: needs.detect-noop.outputs.noop != 'true' strategy: matrix: - area: [lifecycle, pkg, apiextensions] + area: [lifecycle, pkg, apiextensions, xfn] steps: - name: Setup QEMU @@ -247,7 +247,7 @@ jobs: platforms: all - name: Setup Docker Buildx - uses: docker/setup-buildx-action@ecf95283f03858871ff00b787d79c419715afc34 # v2 + uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # v2 with: version: ${{ env.DOCKER_BUILDX_VERSION }} install: true @@ -267,7 +267,7 @@ jobs: - name: Find the Go Build Cache id: go - run: echo "::set-output name=cache::$(make go.cachedir)" + run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3 @@ -303,13 +303,23 @@ jobs: if: needs.detect-noop.outputs.noop != 'true' steps: + - name: Cleanup Disk + uses: jlumbroso/free-disk-space@main + with: + android: true + dotnet: true + haskell: true + tool-cache: true + large-packages: false + swap-storage: false + - name: Setup QEMU uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2 with: platforms: all - name: Setup Docker Buildx - uses: docker/setup-buildx-action@ecf95283f03858871ff00b787d79c419715afc34 # v2 + uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # v2 with: version: ${{ env.DOCKER_BUILDX_VERSION }} install: true @@ -329,7 +339,7 @@ jobs: - name: Find the Go Build Cache id: go - run: echo "::set-output name=cache::$(make go.cachedir)" + run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - name: Cache the Go Build Cache uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index bf75644a0..2eb8fd553 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -13,7 +13,7 @@ on: env: # Common versions - GO_VERSION: '1.20.5' + GO_VERSION: '1.20.7' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index c7a63611e..e1f8d0ede 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -131,7 +131,7 @@ jobs: retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@f6e388ebf0efc915c6c5b165b019ee61a6746a38 # v2 + uses: github/codeql-action/upload-sarif@0ba4244466797eb048eb91a6cd43d5c03ca8bd05 # v2 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} diff --git a/ADOPTERS.md b/ADOPTERS.md index 89a475e29..1fb265c87 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -51,3 +51,5 @@ This list is sorted in the order that organizations were added to it. | [PITS Global Data Recovery Services](https://www.pitsdatarecovery.net/) | @pheianox | Declarative configuration and integration with CI/CD pipelines | | [NASA Science Cloud](https://smce.nasa.gov/) | [ramon.e.ramirez-linan@nasa.gov](mailto:ramon.e.ramirez-linan@nasa.gov) ([@rezuma](https://github.com/rezuma)) | [NASA Science Cloud](https://smce.nasa.gov) has created compositions to deploy the Open Science Studio, a jupyterhub based platform that connects to HPC in the cloud and foster NASA Open Science Initiative. Navteca ([@navteca](https://github.com/Navteca)) has been helping NASA with this initiative | | [Navteca](https://navteca.com/) | [rlinan@navteca.com](mailto:rlinan@navteca.com) ([@navteca](https://github.com/Navteca)) | [Navteca](https://www.navteca.com) is adopting Crossplane to deploy [Voice Atlas](https://www.voiceatlas.com) a cloud based product that let customer connect corporate knowledge with any Large Language Model and offered to be consumed by users through any channel (slack, MS Teams, Website, etc) | +| [SAP](https://sap.com/) | [d.small@sap.com](mailto:d.small@sap.com)| [SAP](https://sap.com) uses Crossplane as part of a solution that gives teams owning micro-services the ability to provision hyper-scaler hosted backing services such as Redis on demand. | +| [Airnity](https://airnity.com/) | [hello@airnity.com](mailto:hello@airnity.com) | [Airnity](https://airnity.com/) uses Crossplane to deploy a worldwide cellular connectivity platform for the automotive industry. | diff --git a/Makefile b/Makefile index fe87712cf..ec5920d70 100644 --- a/Makefile +++ b/Makefile @@ -40,7 +40,7 @@ GOLANGCILINT_VERSION = 1.53.3 # Setup Kubernetes tools USE_HELM3 = true -HELM3_VERSION = v3.12.1 +HELM3_VERSION = v3.12.2 KIND_VERSION = v0.20.0 -include build/makelib/k8s_tools.mk @@ -104,10 +104,20 @@ cobertura: grep -v zz_generated.deepcopy | \ $(GOCOVER_COBERTURA) > $(GO_TEST_OUTPUT)/cobertura-coverage.xml -e2e-tag-images: +# TODO(pedjak): +# https://github.com/crossplane/crossplane/issues/4294 +e2e.test.images: + @$(INFO) Building E2E test images + @docker build --load -t $(BUILD_REGISTRY)/fn-labelizer-$(TARGETARCH) test/e2e/testdata/images/labelizer + @docker build --load -t $(BUILD_REGISTRY)/fn-tmp-writer-$(TARGETARCH) test/e2e/testdata/images/tmp-writer + @$(OK) Built E2E test images + +e2e-tag-images: e2e.test.images @$(INFO) Tagging E2E test images @docker tag $(BUILD_REGISTRY)/$(PROJECT_NAME)-$(TARGETARCH) crossplane-e2e/$(PROJECT_NAME):latest || $(FAIL) @docker tag $(BUILD_REGISTRY)/xfn-$(TARGETARCH) crossplane-e2e/xfn:latest || $(FAIL) + @docker tag $(BUILD_REGISTRY)/fn-labelizer-$(TARGETARCH) crossplane-e2e/fn-labelizer:latest || $(FAIL) + @docker tag $(BUILD_REGISTRY)/fn-tmp-writer-$(TARGETARCH) crossplane-e2e/fn-tmp-writer:latest || $(FAIL) @$(OK) Tagged E2E test images # NOTE(negz): There's already a go.test.integration target, but it's weird. @@ -118,7 +128,7 @@ E2E_TEST_FLAGS ?= # https://github.com/kubernetes-sigs/e2e-framework/issues/282 E2E_PATH = $(WORK_DIR)/e2e -e2e-run-tests: $(KIND) $(HELM3) +e2e-run-tests: @$(INFO) Run E2E tests @mkdir -p $(E2E_PATH) @ln -sf $(KIND) $(E2E_PATH)/kind @@ -128,7 +138,7 @@ e2e-run-tests: $(KIND) $(HELM3) e2e.init: build e2e-tag-images -e2e.run: e2e-run-tests +e2e.run: $(KIND) $(HELM3) e2e-run-tests # Update the submodules, such as the common build scripts. submodules: @@ -160,7 +170,7 @@ run: go.build @# To see other arguments that can be provided, run the command with --help instead $(GO_OUT_DIR)/$(PROJECT_NAME) core start --debug -.PHONY: manifests cobertura submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile +.PHONY: manifests cobertura submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile e2e.test.images # ==================================================================================== # Special Targets diff --git a/OWNERS.md b/OWNERS.md index 9fbd336f3..352ea9b95 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -26,10 +26,10 @@ See [CODEOWNERS](CODEOWNERS) for automatic PR assignment. ## Maintainers * Nic Cope ([negz](https://github.com/negz)) -* Daniel Mangum ([hasheddan](https://github.com/hasheddan)) * Muvaffak Onus ([muvaf](https://github.com/muvaf)) * Hasan Turken ([turkenh](https://github.com/turkenh)) * Bob Haddleton ([bobh66](https://github.com/bobh66)) +* Philippe Scorsolini ([phisco](https://github.com/phisco)) ## Reviewers @@ -37,12 +37,13 @@ See [CODEOWNERS](CODEOWNERS) for automatic PR assignment. * Daren Iott ([nullable-eth](https://github.com/nullable-eth)) * Ezgi Demirel ([ezgidemirel](https://github.com/ezgidemirel)) * Max Blatt ([MisterMX](https://github.com/MisterMX)) -* Philippe Scorsolini ([phisco](https://github.com/phisco)) * Jared Watts ([jbw976](https://github.com/jbw976)) * Lovro Sviben ([lsviben](https://github.com/lsviben)) +* Predrag Knezevic ([pedjak](https://github.com/pedjak)) ## Emeritus maintainers * Bassam Tabbara ([bassam](https://github.com/bassam)) * Jared Watts ([jbw976](https://github.com/jbw976)) * Illya Chekrygin ([ichekrygin](https://github.com/ichekrygin)) +* Daniel Mangum ([hasheddan](https://github.com/hasheddan)) diff --git a/SECURITY.md b/SECURITY.md index 471b30823..3265e9322 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -11,6 +11,9 @@ The following security related audits have been performed in the Crossplane project and are available for download from the [security folder](./security) and from the direct links below: +* A security audit was completed in July 2023 by [Ada + Logics](https://adalogics.com/). The full report is available + [here](./security/ADA-security-audit-23.pdf). * A fuzzing security audit was completed in March 2023 by [Ada Logics](https://adalogics.com/). The full report is available [here](./security/ADA-fuzzing-audit-22.pdf). diff --git a/apis/apiextensions/fn/proto/v1alpha1/run_function.pb.go b/apis/apiextensions/fn/proto/v1alpha1/run_function.pb.go index 82d0e7e1c..8929c75e5 100644 --- a/apis/apiextensions/fn/proto/v1alpha1/run_function.pb.go +++ b/apis/apiextensions/fn/proto/v1alpha1/run_function.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc (unknown) // source: apiextensions/fn/proto/v1alpha1/run_function.proto diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go new file mode 100644 index 000000000..59035f05e --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go @@ -0,0 +1,815 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/run_function.proto + +package v1beta1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Severity of Function results. +type Severity int32 + +const ( + Severity_SEVERITY_UNSPECIFIED Severity = 0 + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + Severity_SEVERITY_FATAL Severity = 1 + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + Severity_SEVERITY_WARNING Severity = 2 + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + Severity_SEVERITY_NORMAL Severity = 3 +) + +// Enum value maps for Severity. +var ( + Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "SEVERITY_FATAL", + 2: "SEVERITY_WARNING", + 3: "SEVERITY_NORMAL", + } + Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "SEVERITY_FATAL": 1, + "SEVERITY_WARNING": 2, + "SEVERITY_NORMAL": 3, + } +) + +func (x Severity) Enum() *Severity { + p := new(Severity) + *p = x + return p +} + +func (x Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Severity) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0].Descriptor() +} + +func (Severity) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0] +} + +func (x Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Severity.Descriptor instead. +func (Severity) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} +} + +// A RunFunctionRequest requests that the Composition Function be run. +type RunFunctionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this request. + Meta *RequestMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + Observed *State `protobuf:"bytes,2,opt,name=observed,proto3" json:"observed,omitempty"` + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by processing a Composition's + // patch-and-transform resources array. It may also have been accumulated by + // previous Functions in the pipeline. + Desired *State `protobuf:"bytes,3,opt,name=desired,proto3" json:"desired,omitempty"` + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + Input *structpb.Struct `protobuf:"bytes,4,opt,name=input,proto3,oneof" json:"input,omitempty"` +} + +func (x *RunFunctionRequest) Reset() { + *x = RunFunctionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionRequest) ProtoMessage() {} + +func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. +func (*RunFunctionRequest) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} +} + +func (x *RunFunctionRequest) GetMeta() *RequestMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionRequest) GetObserved() *State { + if x != nil { + return x.Observed + } + return nil +} + +func (x *RunFunctionRequest) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionRequest) GetInput() *structpb.Struct { + if x != nil { + return x.Input + } + return nil +} + +// A RunFunctionResponse contains the result of a Composition Function run. +type RunFunctionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this response. + Meta *ResponseMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + Desired *State `protobuf:"bytes,2,opt,name=desired,proto3" json:"desired,omitempty"` + // Results of the Function run. Results are used for observability purposes. + Results []*Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *RunFunctionResponse) Reset() { + *x = RunFunctionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionResponse) ProtoMessage() {} + +func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. +func (*RunFunctionResponse) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} +} + +func (x *RunFunctionResponse) GetMeta() *ResponseMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionResponse) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionResponse) GetResults() []*Result { + if x != nil { + return x.Results + } + return nil +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +type RequestMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` +} + +func (x *RequestMeta) Reset() { + *x = RequestMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestMeta) ProtoMessage() {} + +func (x *RequestMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. +func (*RequestMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} +} + +func (x *RequestMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +type ResponseMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` +} + +func (x *ResponseMeta) Reset() { + *x = ResponseMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMeta) ProtoMessage() {} + +func (x *ResponseMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. +func (*ResponseMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} +} + +func (x *ResponseMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ResponseMeta) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// State of the composite resource (XR) and any composed resources. +type State struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the composite resource (XR). + Composite *Resource `protobuf:"bytes,1,opt,name=composite,proto3" json:"composite,omitempty"` + // The state of any composed resources. + Resources map[string]*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *State) Reset() { + *x = State{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *State) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*State) ProtoMessage() {} + +func (x *State) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use State.ProtoReflect.Descriptor instead. +func (*State) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{4} +} + +func (x *State) GetComposite() *Resource { + if x != nil { + return x.Composite + } + return nil +} + +func (x *State) GetResources() map[string]*Resource { + if x != nil { + return x.Resources + } + return nil +} + +// A Resource represents the state of a resource. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The JSON representation of the resource. + Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The resource's connection details. + ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{5} +} + +func (x *Resource) GetResource() *structpb.Struct { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetConnectionDetails() map[string][]byte { + if x != nil { + return x.ConnectionDetails + } + return nil +} + +// A Result of running a Function. +type Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Severity of this result. + Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` + // Human-readable details about the result. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *Result) Reset() { + *x = Result{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Result) ProtoMessage() {} + +func (x *Result) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Result.ProtoReflect.Descriptor instead. +func (*Result) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{6} +} + +func (x *Result) GetSeverity() Severity { + if x != nil { + return x.Severity + } + return Severity_SEVERITY_UNSPECIFIED +} + +func (x *Result) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_apiextensions_fn_proto_v1beta1_run_function_proto protoreflect.FileDescriptor + +var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ + 0x0a, 0x31, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2f, 0x72, 0x75, 0x6e, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x97, 0x02, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x07, + 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, + 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0xda, 0x01, 0x0a, 0x13, + 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, + 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, + 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xf5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x06, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, + 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, + 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, + 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce sync.Once + file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc +) + +func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte { + file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce.Do(func() { + file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData) + }) + return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData +} + +var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ + (Severity)(0), // 0: apiextensions.fn.proto.v1beta1.Severity + (*RunFunctionRequest)(nil), // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest + (*RunFunctionResponse)(nil), // 2: apiextensions.fn.proto.v1beta1.RunFunctionResponse + (*RequestMeta)(nil), // 3: apiextensions.fn.proto.v1beta1.RequestMeta + (*ResponseMeta)(nil), // 4: apiextensions.fn.proto.v1beta1.ResponseMeta + (*State)(nil), // 5: apiextensions.fn.proto.v1beta1.State + (*Resource)(nil), // 6: apiextensions.fn.proto.v1beta1.Resource + (*Result)(nil), // 7: apiextensions.fn.proto.v1beta1.Result + nil, // 8: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 9: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 10: google.protobuf.Struct + (*durationpb.Duration)(nil), // 11: google.protobuf.Duration +} +var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ + 3, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta + 5, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State + 5, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 10, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 4, // 4: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta + 5, // 5: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 7, // 6: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result + 11, // 7: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 6, // 8: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource + 8, // 9: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 10, // 10: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 9, // 11: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 0, // 12: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity + 6, // 13: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 1, // 14: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 2, // 15: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 15, // [15:16] is the sub-list for method output_type + 14, // [14:15] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } +func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { + if File_apiextensions_fn_proto_v1beta1_run_function_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunFunctionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunFunctionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*State); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, + NumEnums: 1, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes, + DependencyIndexes: file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs, + EnumInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes, + MessageInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes, + }.Build() + File_apiextensions_fn_proto_v1beta1_run_function_proto = out.File + file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = nil + file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = nil + file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = nil +} diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1beta1/run_function.proto new file mode 100644 index 000000000..55588bf54 --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/run_function.proto @@ -0,0 +1,131 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/duration.proto"; + +package apiextensions.fn.proto.v1beta1; + +option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1"; + +// A FunctionRunnerService is a Composition Function. +service FunctionRunnerService { + // RunFunction runs the Composition Function. + rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} +} + +// A RunFunctionRequest requests that the Composition Function be run. +message RunFunctionRequest { + // Metadata pertaining to this request. + RequestMeta meta = 1; + + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + State observed = 2; + + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by processing a Composition's + // patch-and-transform resources array. It may also have been accumulated by + // previous Functions in the pipeline. + State desired = 3; + + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + optional google.protobuf.Struct input = 4; +} + +// A RunFunctionResponse contains the result of a Composition Function run. +message RunFunctionResponse { + // Metadata pertaining to this response. + ResponseMeta meta = 1; + + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + State desired = 2; + + // Results of the Function run. Results are used for observability purposes. + repeated Result results = 3; +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +message RequestMeta { + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + string tag = 1; +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +message ResponseMeta { + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + string tag = 1; + + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + optional google.protobuf.Duration ttl = 2; +} + +// State of the composite resource (XR) and any composed resources. +message State { + // The state of the composite resource (XR). + Resource composite = 1; + + // The state of any composed resources. + map resources = 2; +} + +// A Resource represents the state of a resource. +message Resource { + // The JSON representation of the resource. + google.protobuf.Struct resource = 1; + + // The resource's connection details. + map connection_details = 2; +} + +// A Result of running a Function. +message Result { + // Severity of this result. + Severity severity = 1; + + // Human-readable details about the result. + string message = 2; +} + +// Severity of Function results. +enum Severity { + SEVERITY_UNSPECIFIED = 0; + + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + SEVERITY_FATAL = 1; + + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + SEVERITY_WARNING = 2; + + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + SEVERITY_NORMAL = 3; +} \ No newline at end of file diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go new file mode 100644 index 000000000..5ea2c19d6 --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go @@ -0,0 +1,126 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/run_function.proto + +package v1beta1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1beta1.FunctionRunnerService/RunFunction" +) + +// FunctionRunnerServiceClient is the client API for FunctionRunnerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FunctionRunnerServiceClient interface { + // RunFunction runs the Composition Function. + RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) +} + +type functionRunnerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewFunctionRunnerServiceClient(cc grpc.ClientConnInterface) FunctionRunnerServiceClient { + return &functionRunnerServiceClient{cc} +} + +func (c *functionRunnerServiceClient) RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) { + out := new(RunFunctionResponse) + err := c.cc.Invoke(ctx, FunctionRunnerService_RunFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FunctionRunnerServiceServer is the server API for FunctionRunnerService service. +// All implementations must embed UnimplementedFunctionRunnerServiceServer +// for forward compatibility +type FunctionRunnerServiceServer interface { + // RunFunction runs the Composition Function. + RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +// UnimplementedFunctionRunnerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedFunctionRunnerServiceServer struct { +} + +func (UnimplementedFunctionRunnerServiceServer) RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunFunction not implemented") +} +func (UnimplementedFunctionRunnerServiceServer) mustEmbedUnimplementedFunctionRunnerServiceServer() {} + +// UnsafeFunctionRunnerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FunctionRunnerServiceServer will +// result in compilation errors. +type UnsafeFunctionRunnerServiceServer interface { + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +func RegisterFunctionRunnerServiceServer(s grpc.ServiceRegistrar, srv FunctionRunnerServiceServer) { + s.RegisterService(&FunctionRunnerService_ServiceDesc, srv) +} + +func _FunctionRunnerService_RunFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FunctionRunnerService_RunFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, req.(*RunFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// FunctionRunnerService_ServiceDesc is the grpc.ServiceDesc for FunctionRunnerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "apiextensions.fn.proto.v1beta1.FunctionRunnerService", + HandlerType: (*FunctionRunnerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RunFunction", + Handler: _FunctionRunnerService_RunFunction_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "apiextensions/fn/proto/v1beta1/run_function.proto", +} diff --git a/apis/apiextensions/v1/composition_environment.go b/apis/apiextensions/v1/composition_environment.go index 98f73f77a..ab63c773c 100644 --- a/apis/apiextensions/v1/composition_environment.go +++ b/apis/apiextensions/v1/composition_environment.go @@ -17,8 +17,11 @@ limitations under the License. package v1 import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation/field" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane/internal/validation/errors" ) @@ -42,6 +45,11 @@ type EnvironmentConfiguration struct { // Patches is a list of environment patches that are executed before a // composition's resources are composed. Patches []EnvironmentPatch `json:"patches,omitempty"` + + // Policy represents the Resolve and Resolution policies which apply to + // all EnvironmentSourceReferences in EnvironmentConfigs list. + // +optional + Policy *xpv1.Policy `json:"policy,omitempty"` } // Validate the EnvironmentConfiguration. @@ -63,6 +71,28 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { return errs } +// ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. +func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { + + if e == nil || len(e.EnvironmentConfigs) == 0 { + return false + } + + if len(currentRefs) == 0 { + return true + } + + return e.Policy.IsResolvePolicyAlways() +} + +// IsRequired specifies whether EnvironmentConfiguration is required or not. +func (e *EnvironmentConfiguration) IsRequired() bool { + if e == nil { + return false + } + return !e.Policy.IsResolutionPolicyOptional() +} + // EnvironmentSourceType specifies the way the EnvironmentConfig is selected. type EnvironmentSourceType string @@ -87,7 +117,7 @@ type EnvironmentSource struct { // +optional Ref *EnvironmentSourceReference `json:"ref,omitempty"` - // Selector selects one EnvironmentConfig via labels. + // Selector selects EnvironmentConfig(s) via labels. // +optional Selector *EnvironmentSourceSelector `json:"selector,omitempty"` } @@ -110,10 +140,9 @@ func (e *EnvironmentSource) Validate() *field.Error { if len(e.Selector.MatchLabels) == 0 { return field.Required(field.NewPath("selector", "matchLabels"), "selector must have at least one match label") } - for i, m := range e.Selector.MatchLabels { - if err := m.Validate(); err != nil { - return errors.WrapFieldError(err, field.NewPath("selector", "matchLabels").Index(i)) - } + + if err := e.Selector.Validate(); err != nil { + return errors.WrapFieldError(err, field.NewPath("selector")) } default: return field.Invalid(field.NewPath("type"), e.Type, "invalid type") @@ -135,12 +164,53 @@ func (e *EnvironmentSourceReference) Validate() *field.Error { return nil } +// EnvironmentSourceSelectorModeType specifies amount of retrieved EnvironmentConfigs +// with matching label. +type EnvironmentSourceSelectorModeType string + +const ( + // EnvironmentSourceSelectorSingleMode extracts only first EnvironmentConfig from the sorted list. + EnvironmentSourceSelectorSingleMode EnvironmentSourceSelectorModeType = "Single" + + // EnvironmentSourceSelectorMultiMode extracts multiple EnvironmentConfigs from the sorted list. + EnvironmentSourceSelectorMultiMode EnvironmentSourceSelectorModeType = "Multiple" +) + // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { + + // Mode specifies retrieval strategy: "Single" or "Multiple". + // +kubebuilder:validation:Enum=Single;Multiple + // +kubebuilder:default=Single + Mode EnvironmentSourceSelectorModeType `json:"mode,omitempty"` + + // MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil. + MaxMatch *uint64 `json:"maxMatch,omitempty"` + + // SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted. + // +kubebuilder:default="metadata.name" + SortByFieldPath string `json:"sortByFieldPath,omitempty"` + // MatchLabels ensures an object with matching labels is selected. MatchLabels []EnvironmentSourceSelectorLabelMatcher `json:"matchLabels,omitempty"` } +// Validate logically validates the EnvironmentSourceSelector. +func (e *EnvironmentSourceSelector) Validate() *field.Error { + + if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { + return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") + } + + for i, m := range e.MatchLabels { + if err := m.Validate(); err != nil { + return errors.WrapFieldError(err, field.NewPath("matchLabels").Index(i)) + } + } + + return nil +} + // EnvironmentSourceSelectorLabelMatcherType specifies where the value for a // label comes from. type EnvironmentSourceSelectorLabelMatcherType string @@ -161,7 +231,7 @@ type EnvironmentSourceSelectorLabelMatcher struct { // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;Value // +kubebuilder:default=FromCompositeFieldPath - Type EnvironmentSourceSelectorLabelMatcherType `json:"type"` + Type EnvironmentSourceSelectorLabelMatcherType `json:"type,omitempty"` // Key of the label to match. Key string `json:"key"` diff --git a/apis/apiextensions/v1/composition_environment_test.go b/apis/apiextensions/v1/composition_environment_test.go index 240c0fcc2..6363d0690 100644 --- a/apis/apiextensions/v1/composition_environment_test.go +++ b/apis/apiextensions/v1/composition_environment_test.go @@ -5,8 +5,11 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/pointer" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" ) func TestEnvironmentPatchValidate(t *testing.T) { @@ -75,3 +78,85 @@ func TestEnvironmentPatchValidate(t *testing.T) { }) } } + +func TestEnvironmentShouldResolve(t *testing.T) { + withResolvePolicy := func() *v1.ResolvePolicy { + p := v1.ResolvePolicyAlways + return &p + } + + type args struct { + refs []corev1.ObjectReference + ec *EnvironmentConfiguration + } + + cases := map[string]struct { + reason string + args args + want bool + }{ + "DontResolveWhenHaveRefs": { + reason: "Should not resolve when composite has refs", + args: args{ + ec: &EnvironmentConfiguration{ + EnvironmentConfigs: []EnvironmentSource{{}}, + }, + refs: []corev1.ObjectReference{{}}, + }, + want: false, + }, + "ResolveWhenNoRefs": { + reason: "Should resolve when no refs are held", + args: args{ + ec: &EnvironmentConfiguration{ + EnvironmentConfigs: []EnvironmentSource{{}}, + }, + refs: []corev1.ObjectReference{}, + }, + want: true, + }, + "ResolveWhenPolicyAlways": { + reason: "Should resolve when resolve policy is Always", + args: args{ + ec: &EnvironmentConfiguration{ + EnvironmentConfigs: []EnvironmentSource{ + {}, + }, + Policy: &v1.Policy{ + Resolve: withResolvePolicy(), + }, + }, + refs: []corev1.ObjectReference{ + {}, + {}, + }, + }, + want: true, + }, + "DontResolveWhenPolicyNotAlways": { + reason: "Should not resolve when resolve policy is not Always", + args: args{ + ec: &EnvironmentConfiguration{ + EnvironmentConfigs: []EnvironmentSource{ + {}, + }, + }, + refs: []corev1.ObjectReference{ + {}, + {}, + }, + }, + want: false, + }, + } + + for name, tc := range cases { + + t.Run(name, func(t *testing.T) { + got := tc.args.ec.ShouldResolve(tc.args.refs) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("%s\nShouldResolve(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index 914ee47eb..c2ec81ceb 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -95,6 +95,10 @@ type CompositeResourceDefinitionSpec struct { // Conversion defines all conversion settings for the defined Composite resource. // +optional Conversion *extv1.CustomResourceConversion `json:"conversion,omitempty"` + + // Metadata specifies the desired metadata for the defined composite resource and claim CRD's. + // +optional + Metadata *CompositeResourceDefinitionSpecMetadata `json:"metadata,omitempty"` } // A CompositionReference references a Composition. @@ -103,6 +107,25 @@ type CompositionReference struct { Name string `json:"name"` } +// CompositeResourceDefinitionSpecMetadata specifies the desired metadata of the defined composite resource and claim CRD's. +type CompositeResourceDefinitionSpecMetadata struct { + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // and services. + // These labels are added to the composite resource and claim CRD's in addition + // to any labels defined by `CompositionResourceDefinition` `metadata.labels`. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + // CompositeResourceDefinitionVersion describes a version of an XR. type CompositeResourceDefinitionVersion struct { // Name of this version, e.g. “v1”, “v2beta1”, etc. Composite resources are diff --git a/apis/apiextensions/v1/zz_generated.conversion.go b/apis/apiextensions/v1/zz_generated.conversion.go index 000c5cb20..d529bdf63 100755 --- a/apis/apiextensions/v1/zz_generated.conversion.go +++ b/apis/apiextensions/v1/zz_generated.conversion.go @@ -231,6 +231,7 @@ func (c *GeneratedRevisionSpecConverter) pV1EnvironmentConfigurationToPV1Environ } } v1EnvironmentConfiguration.Patches = v1EnvironmentPatchList + v1EnvironmentConfiguration.Policy = c.pV1PolicyToPV1Policy((*source).Policy) pV1EnvironmentConfiguration = &v1EnvironmentConfiguration } return pV1EnvironmentConfiguration @@ -248,6 +249,14 @@ func (c *GeneratedRevisionSpecConverter) pV1EnvironmentSourceSelectorToPV1Enviro var pV1EnvironmentSourceSelector *EnvironmentSourceSelector if source != nil { var v1EnvironmentSourceSelector EnvironmentSourceSelector + v1EnvironmentSourceSelector.Mode = EnvironmentSourceSelectorModeType((*source).Mode) + var pUint64 *uint64 + if (*source).MaxMatch != nil { + xuint64 := *(*source).MaxMatch + pUint64 = &xuint64 + } + v1EnvironmentSourceSelector.MaxMatch = pUint64 + v1EnvironmentSourceSelector.SortByFieldPath = (*source).SortByFieldPath var v1EnvironmentSourceSelectorLabelMatcherList []EnvironmentSourceSelectorLabelMatcher if (*source).MatchLabels != nil { v1EnvironmentSourceSelectorLabelMatcherList = make([]EnvironmentSourceSelectorLabelMatcher, len((*source).MatchLabels)) @@ -363,6 +372,26 @@ func (c *GeneratedRevisionSpecConverter) pV1PatchPolicyToPV1PatchPolicy(source * } return pV1PatchPolicy } +func (c *GeneratedRevisionSpecConverter) pV1PolicyToPV1Policy(source *v13.Policy) *v13.Policy { + var pV1Policy *v13.Policy + if source != nil { + var v1Policy v13.Policy + var pV1ResolvePolicy *v13.ResolvePolicy + if (*source).Resolve != nil { + v1ResolvePolicy := v13.ResolvePolicy(*(*source).Resolve) + pV1ResolvePolicy = &v1ResolvePolicy + } + v1Policy.Resolve = pV1ResolvePolicy + var pV1ResolutionPolicy *v13.ResolutionPolicy + if (*source).Resolution != nil { + v1ResolutionPolicy := v13.ResolutionPolicy(*(*source).Resolution) + pV1ResolutionPolicy = &v1ResolutionPolicy + } + v1Policy.Resolution = pV1ResolutionPolicy + pV1Policy = &v1Policy + } + return pV1Policy +} func (c *GeneratedRevisionSpecConverter) pV1StoreConfigReferenceToPV1StoreConfigReference(source *StoreConfigReference) *StoreConfigReference { var pV1StoreConfigReference *StoreConfigReference if source != nil { diff --git a/apis/apiextensions/v1/zz_generated.deepcopy.go b/apis/apiextensions/v1/zz_generated.deepcopy.go index ab410c04f..3237fe53a 100644 --- a/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -233,6 +233,11 @@ func (in *CompositeResourceDefinitionSpec) DeepCopyInto(out *CompositeResourceDe *out = new(apiextensionsv1.CustomResourceConversion) (*in).DeepCopyInto(*out) } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(CompositeResourceDefinitionSpecMetadata) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeResourceDefinitionSpec. @@ -245,6 +250,35 @@ func (in *CompositeResourceDefinitionSpec) DeepCopy() *CompositeResourceDefiniti return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeResourceDefinitionSpecMetadata) DeepCopyInto(out *CompositeResourceDefinitionSpecMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeResourceDefinitionSpecMetadata. +func (in *CompositeResourceDefinitionSpecMetadata) DeepCopy() *CompositeResourceDefinitionSpecMetadata { + if in == nil { + return nil + } + out := new(CompositeResourceDefinitionSpecMetadata) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CompositeResourceDefinitionStatus) DeepCopyInto(out *CompositeResourceDefinitionStatus) { *out = *in @@ -772,6 +806,11 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(commonv1.Policy) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentConfiguration. @@ -869,6 +908,11 @@ func (in *EnvironmentSourceReference) DeepCopy() *EnvironmentSourceReference { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvironmentSourceSelector) DeepCopyInto(out *EnvironmentSourceSelector) { *out = *in + if in.MaxMatch != nil { + in, out := &in.MaxMatch, &out.MaxMatch + *out = new(uint64) + **out = **in + } if in.MatchLabels != nil { in, out := &in.MatchLabels, &out.MatchLabels *out = make([]EnvironmentSourceSelectorLabelMatcher, len(*in)) diff --git a/apis/apiextensions/v1alpha1/register.go b/apis/apiextensions/v1alpha1/register.go index 881865396..2e2e04e63 100644 --- a/apis/apiextensions/v1alpha1/register.go +++ b/apis/apiextensions/v1alpha1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Composition type metadata. +// EnvironmentConfig type metadata. var ( EnvironmentConfigKind = reflect.TypeOf(EnvironmentConfig{}).Name() EnvironmentConfigGroupKind = schema.GroupKind{Group: Group, Kind: EnvironmentConfigKind}.String() diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go index 3fe523a83..fb3a2d7f4 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go @@ -19,8 +19,11 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation/field" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane/internal/validation/errors" ) @@ -44,6 +47,11 @@ type EnvironmentConfiguration struct { // Patches is a list of environment patches that are executed before a // composition's resources are composed. Patches []EnvironmentPatch `json:"patches,omitempty"` + + // Policy represents the Resolve and Resolution policies which apply to + // all EnvironmentSourceReferences in EnvironmentConfigs list. + // +optional + Policy *xpv1.Policy `json:"policy,omitempty"` } // Validate the EnvironmentConfiguration. @@ -65,6 +73,28 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { return errs } +// ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. +func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { + + if e == nil || len(e.EnvironmentConfigs) == 0 { + return false + } + + if len(currentRefs) == 0 { + return true + } + + return e.Policy.IsResolvePolicyAlways() +} + +// IsRequired specifies whether EnvironmentConfiguration is required or not. +func (e *EnvironmentConfiguration) IsRequired() bool { + if e == nil { + return false + } + return !e.Policy.IsResolutionPolicyOptional() +} + // EnvironmentSourceType specifies the way the EnvironmentConfig is selected. type EnvironmentSourceType string @@ -89,7 +119,7 @@ type EnvironmentSource struct { // +optional Ref *EnvironmentSourceReference `json:"ref,omitempty"` - // Selector selects one EnvironmentConfig via labels. + // Selector selects EnvironmentConfig(s) via labels. // +optional Selector *EnvironmentSourceSelector `json:"selector,omitempty"` } @@ -112,10 +142,9 @@ func (e *EnvironmentSource) Validate() *field.Error { if len(e.Selector.MatchLabels) == 0 { return field.Required(field.NewPath("selector", "matchLabels"), "selector must have at least one match label") } - for i, m := range e.Selector.MatchLabels { - if err := m.Validate(); err != nil { - return errors.WrapFieldError(err, field.NewPath("selector", "matchLabels").Index(i)) - } + + if err := e.Selector.Validate(); err != nil { + return errors.WrapFieldError(err, field.NewPath("selector")) } default: return field.Invalid(field.NewPath("type"), e.Type, "invalid type") @@ -137,12 +166,53 @@ func (e *EnvironmentSourceReference) Validate() *field.Error { return nil } +// EnvironmentSourceSelectorModeType specifies amount of retrieved EnvironmentConfigs +// with matching label. +type EnvironmentSourceSelectorModeType string + +const ( + // EnvironmentSourceSelectorSingleMode extracts only first EnvironmentConfig from the sorted list. + EnvironmentSourceSelectorSingleMode EnvironmentSourceSelectorModeType = "Single" + + // EnvironmentSourceSelectorMultiMode extracts multiple EnvironmentConfigs from the sorted list. + EnvironmentSourceSelectorMultiMode EnvironmentSourceSelectorModeType = "Multiple" +) + // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { + + // Mode specifies retrieval strategy: "Single" or "Multiple". + // +kubebuilder:validation:Enum=Single;Multiple + // +kubebuilder:default=Single + Mode EnvironmentSourceSelectorModeType `json:"mode,omitempty"` + + // MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil. + MaxMatch *uint64 `json:"maxMatch,omitempty"` + + // SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted. + // +kubebuilder:default="metadata.name" + SortByFieldPath string `json:"sortByFieldPath,omitempty"` + // MatchLabels ensures an object with matching labels is selected. MatchLabels []EnvironmentSourceSelectorLabelMatcher `json:"matchLabels,omitempty"` } +// Validate logically validates the EnvironmentSourceSelector. +func (e *EnvironmentSourceSelector) Validate() *field.Error { + + if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { + return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") + } + + for i, m := range e.MatchLabels { + if err := m.Validate(); err != nil { + return errors.WrapFieldError(err, field.NewPath("matchLabels").Index(i)) + } + } + + return nil +} + // EnvironmentSourceSelectorLabelMatcherType specifies where the value for a // label comes from. type EnvironmentSourceSelectorLabelMatcherType string @@ -163,7 +233,7 @@ type EnvironmentSourceSelectorLabelMatcher struct { // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;Value // +kubebuilder:default=FromCompositeFieldPath - Type EnvironmentSourceSelectorLabelMatcherType `json:"type"` + Type EnvironmentSourceSelectorLabelMatcherType `json:"type,omitempty"` // Key of the label to match. Key string `json:"key"` diff --git a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index a10c44e86..5ff1a18e0 100644 --- a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -445,6 +445,11 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(commonv1.Policy) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentConfiguration. @@ -542,6 +547,11 @@ func (in *EnvironmentSourceReference) DeepCopy() *EnvironmentSourceReference { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvironmentSourceSelector) DeepCopyInto(out *EnvironmentSourceSelector) { *out = *in + if in.MaxMatch != nil { + in, out := &in.MaxMatch, &out.MaxMatch + *out = new(uint64) + **out = **in + } if in.MatchLabels != nil { in, out := &in.MatchLabels, &out.MatchLabels *out = make([]EnvironmentSourceSelectorLabelMatcher, len(*in)) diff --git a/apis/pkg/meta/v1alpha1/function_types.go b/apis/pkg/meta/v1alpha1/function_types.go new file mode 100644 index 000000000..b87d6c01c --- /dev/null +++ b/apis/pkg/meta/v1alpha1/function_types.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + MetaSpec `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// A Function is the description of a Crossplane Function package. +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec"` +} diff --git a/apis/pkg/meta/v1alpha1/register.go b/apis/pkg/meta/v1alpha1/register.go index 41f37e2c0..0447e11f2 100644 --- a/apis/pkg/meta/v1alpha1/register.go +++ b/apis/pkg/meta/v1alpha1/register.go @@ -56,7 +56,16 @@ var ( ConfigurationGroupVersionKind = SchemeGroupVersion.WithKind(ConfigurationKind) ) +// Function type metadata. +var ( + FunctionKind = reflect.TypeOf(Function{}).Name() + FunctionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionKind}.String() + FunctionKindAPIVersion = FunctionKind + "." + SchemeGroupVersion.String() + FunctionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionKind) +) + func init() { SchemeBuilder.Register(&Configuration{}) SchemeBuilder.Register(&Provider{}) + SchemeBuilder.Register(&Function{}) } diff --git a/apis/pkg/meta/v1alpha1/zz_generated.deepcopy.go b/apis/pkg/meta/v1alpha1/zz_generated.deepcopy.go index 69b78ee36..5f449bc7b 100644 --- a/apis/pkg/meta/v1alpha1/zz_generated.deepcopy.go +++ b/apis/pkg/meta/v1alpha1/zz_generated.deepcopy.go @@ -135,6 +135,48 @@ func (in *Dependency) DeepCopy() *Dependency { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.MetaSpec.DeepCopyInto(&out.MetaSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GeneratedFromHubConverter) DeepCopyInto(out *GeneratedFromHubConverter) { *out = *in diff --git a/apis/pkg/v1alpha1/function_types.go b/apis/pkg/v1alpha1/function_types.go new file mode 100644 index 000000000..bbe58f64e --- /dev/null +++ b/apis/pkg/v1alpha1/function_types.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + + v1 "github.com/crossplane/crossplane/apis/pkg/v1" +) + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// Function is the CRD type for a request to deploy a long-running Function. +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="PACKAGE",type="string",JSONPath=".spec.package" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkg} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec"` + Status FunctionStatus `json:"status"` +} + +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + v1.PackageSpec `json:",inline"` +} + +// FunctionStatus represents the observed state of a Function. +type FunctionStatus struct { + xpv1.ConditionedStatus `json:",inline"` + v1.PackageStatus `json:",inline"` + + // Endpoint is the gRPC endpoint where Crossplane will send RunFunctionRequests. + Endpoint string `json:"endpoint,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Function. +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// A FunctionRevision that has been added to Crossplane. +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" +// +kubebuilder:printcolumn:name="IMAGE",type="string",JSONPath=".spec.image" +// +kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.desiredState" +// +kubebuilder:printcolumn:name="DEP-FOUND",type="string",JSONPath=".status.foundDependencies" +// +kubebuilder:printcolumn:name="DEP-INSTALLED",type="string",JSONPath=".status.installedDependencies" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkgrev} +type FunctionRevision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec v1.PackageRevisionSpec `json:"spec,omitempty"` + Status v1.PackageRevisionStatus `json:"status,omitempty"` + + // Endpoint is the gRPC endpoint where Crossplane will send RunFunctionRequests. + Endpoint string `json:"endpoint,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionRevisionList contains a list of FunctionRevision. +type FunctionRevisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionRevision `json:"items"` +} diff --git a/apis/pkg/v1alpha1/register.go b/apis/pkg/v1alpha1/register.go index 63509f215..ff36fccf3 100644 --- a/apis/pkg/v1alpha1/register.go +++ b/apis/pkg/v1alpha1/register.go @@ -48,6 +48,14 @@ var ( ControllerConfigGroupVersionKind = SchemeGroupVersion.WithKind(ControllerConfigKind) ) +// Function type metadata. +var ( + FunctionRevisionKind = reflect.TypeOf(FunctionRevision{}).Name() + FunctionRevisionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionRevisionKind}.String() + FunctionRevisionKindAPIVersion = FunctionRevisionKind + "." + SchemeGroupVersion.String() + FunctionRevisionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionRevisionKind) +) + func init() { SchemeBuilder.Register(&ControllerConfig{}, &ControllerConfigList{}) } diff --git a/apis/pkg/v1alpha1/zz_generated.deepcopy.go b/apis/pkg/v1alpha1/zz_generated.deepcopy.go index 9e79d2c3c..b10b22aef 100644 --- a/apis/pkg/v1alpha1/zz_generated.deepcopy.go +++ b/apis/pkg/v1alpha1/zz_generated.deepcopy.go @@ -216,6 +216,157 @@ func (in *ControllerConfigSpec) DeepCopy() *ControllerConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevision) DeepCopyInto(out *FunctionRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevision. +func (in *FunctionRevision) DeepCopy() *FunctionRevision { + if in == nil { + return nil + } + out := new(FunctionRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionList) DeepCopyInto(out *FunctionRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionList. +func (in *FunctionRevisionList) DeepCopy() *FunctionRevisionList { + if in == nil { + return nil + } + out := new(FunctionRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.PackageSpec.DeepCopyInto(&out.PackageSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) + out.PackageStatus = in.PackageStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodObjectMeta) DeepCopyInto(out *PodObjectMeta) { *out = *in diff --git a/cluster/charts/crossplane/templates/clusterrole.yaml b/cluster/charts/crossplane/templates/clusterrole.yaml index 5f0fc8af4..5559ae3ba 100644 --- a/cluster/charts/crossplane/templates/clusterrole.yaml +++ b/cluster/charts/crossplane/templates/clusterrole.yaml @@ -35,6 +35,7 @@ rules: - apiextensions.k8s.io resources: - customresourcedefinitions + - customresourcedefinitions/status verbs: - "*" - apiGroups: diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index 94fa3c75a..eaa87261d 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -215,6 +215,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if .Values.registryCaBundleConfig.key }} + - name: CA_BUNDLE_PATH + value: "/certs/{{ .Values.registryCaBundleConfig.key }}" + {{- end}} {{- range $key, $value := .Values.xfn.extraEnvVars }} - name: {{ $key | replace "." "_" }} value: {{ $value | quote }} @@ -222,6 +226,10 @@ spec: volumeMounts: - mountPath: /xfn name: xfn-cache + {{- if .Values.registryCaBundleConfig.name }} + - mountPath: /certs + name: ca-certs + {{- end }} {{- end }} volumes: - name: package-cache diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index bf44003c9..99e5e0e0c 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: compositeresourcedefinitions.apiextensions.crossplane.io spec: group: apiextensions.crossplane.io @@ -248,6 +248,29 @@ spec: resource. Composite resources are served under `/apis//...`. Must match the name of the XRD (in the form `.`). type: string + metadata: + description: Metadata specifies the desired metadata for the defined + composite resource and claim CRD's. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + and services. These labels are added to the composite resource + and claim CRD''s in addition to any labels defined by `CompositionResourceDefinition` + `metadata.labels`.' + type: object + type: object names: description: Names specifies the resource and kind names of the defined composite resource. diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 10f024202..d9947e7f2 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: compositionrevisions.apiextensions.crossplane.io spec: group: apiextensions.crossplane.io @@ -95,8 +95,7 @@ spec: - name type: object selector: - description: Selector selects one EnvironmentConfig via - labels. + description: Selector selects EnvironmentConfig(s) via labels. properties: matchLabels: description: MatchLabels ensures an object with matching @@ -128,6 +127,26 @@ spec: - key type: object type: array + maxMatch: + description: MaxMatch specifies the number of extracted + EnvironmentConfigs in Multiple mode, extracts all + if nil. + format: int64 + type: integer + mode: + default: Single + description: 'Mode specifies retrieval strategy: "Single" + or "Multiple".' + enum: + - Single + - Multiple + type: string + sortByFieldPath: + default: metadata.name + description: SortByFieldPath is the path to the field + based on which list of EnvironmentConfigs is alphabetically + sorted. + type: string type: object type: default: Reference @@ -447,6 +466,33 @@ spec: type: string type: object type: array + policy: + description: Policy represents the Resolve and Resolution policies + which apply to all EnvironmentSourceReferences in EnvironmentConfigs + list. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object type: object functions: description: Functions is list of Composition Functions that will @@ -1527,8 +1573,7 @@ spec: - name type: object selector: - description: Selector selects one EnvironmentConfig via - labels. + description: Selector selects EnvironmentConfig(s) via labels. properties: matchLabels: description: MatchLabels ensures an object with matching @@ -1560,6 +1605,26 @@ spec: - key type: object type: array + maxMatch: + description: MaxMatch specifies the number of extracted + EnvironmentConfigs in Multiple mode, extracts all + if nil. + format: int64 + type: integer + mode: + default: Single + description: 'Mode specifies retrieval strategy: "Single" + or "Multiple".' + enum: + - Single + - Multiple + type: string + sortByFieldPath: + default: metadata.name + description: SortByFieldPath is the path to the field + based on which list of EnvironmentConfigs is alphabetically + sorted. + type: string type: object type: default: Reference @@ -1879,6 +1944,33 @@ spec: type: string type: object type: array + policy: + description: Policy represents the Resolve and Resolution policies + which apply to all EnvironmentSourceReferences in EnvironmentConfigs + list. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object type: object functions: description: Functions is list of Composition Functions that will @@ -1929,6 +2021,7 @@ spec: uid?' type: string type: object + x-kubernetes-map-type: atomic type: array network: description: Network configuration for the Composition Function. diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index a0f0c8d5a..aaf2bcd3f 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: compositions.apiextensions.crossplane.io spec: group: apiextensions.crossplane.io @@ -92,8 +92,7 @@ spec: - name type: object selector: - description: Selector selects one EnvironmentConfig via - labels. + description: Selector selects EnvironmentConfig(s) via labels. properties: matchLabels: description: MatchLabels ensures an object with matching @@ -125,6 +124,26 @@ spec: - key type: object type: array + maxMatch: + description: MaxMatch specifies the number of extracted + EnvironmentConfigs in Multiple mode, extracts all + if nil. + format: int64 + type: integer + mode: + default: Single + description: 'Mode specifies retrieval strategy: "Single" + or "Multiple".' + enum: + - Single + - Multiple + type: string + sortByFieldPath: + default: metadata.name + description: SortByFieldPath is the path to the field + based on which list of EnvironmentConfigs is alphabetically + sorted. + type: string type: object type: default: Reference @@ -444,6 +463,33 @@ spec: type: string type: object type: array + policy: + description: Policy represents the Resolve and Resolution policies + which apply to all EnvironmentSourceReferences in EnvironmentConfigs + list. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object type: object functions: description: "Functions is list of Composition Functions that will diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 2b8e17fed..3884d62c2 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: environmentconfigs.apiextensions.crossplane.io spec: group: apiextensions.crossplane.io diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 41b5dfe7c..1291804fd 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: configurationrevisions.pkg.crossplane.io spec: group: pkg.crossplane.io diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index 1b98b1c87..6817f8956 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: configurations.pkg.crossplane.io spec: group: pkg.crossplane.io diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 144446e17..6810e70ee 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: controllerconfigs.pkg.crossplane.io spec: group: pkg.crossplane.io @@ -148,6 +148,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -248,10 +249,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate @@ -328,6 +331,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -384,6 +388,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The @@ -482,6 +487,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -533,6 +539,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The term is applied @@ -633,6 +640,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -689,6 +697,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The @@ -787,6 +796,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -838,6 +848,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The term is applied @@ -916,6 +927,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -933,6 +945,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -957,6 +970,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -975,6 +989,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1001,6 +1016,7 @@ spec: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -1016,6 +1032,7 @@ spec: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -1047,6 +1064,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata that will be added to the provider Pod. @@ -1326,6 +1344,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1744,6 +1765,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -1775,6 +1797,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -1846,6 +1869,7 @@ spec: keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta @@ -1876,6 +1900,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -1929,6 +1954,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 @@ -1972,6 +1998,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -2088,10 +2115,10 @@ spec: referenced type: string required: - - apiGroup - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if @@ -2153,7 +2180,6 @@ spec: to be enabled. type: string required: - - apiGroup - kind - name type: object @@ -2187,6 +2213,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2263,6 +2292,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. More info: @@ -2353,6 +2383,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -2530,6 +2561,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than @@ -2700,6 +2732,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -2729,6 +2762,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be @@ -2777,6 +2811,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -2842,6 +2877,7 @@ spec: Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project @@ -2876,8 +2912,6 @@ spec: type: object type: object type: array - required: - - sources type: object quobyte: description: quobyte represents a Quobyte mount on the host @@ -2960,6 +2994,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -2999,6 +3034,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -3114,6 +3150,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml new file mode 100644 index 000000000..90fc22331 --- /dev/null +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -0,0 +1,287 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: functionrevisions.pkg.crossplane.io +spec: + group: pkg.crossplane.io + names: + categories: + - crossplane + - pkgrev + kind: FunctionRevision + listKind: FunctionRevisionList + plural: functionrevisions + singular: functionrevision + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.revision + name: REVISION + type: string + - jsonPath: .spec.image + name: IMAGE + type: string + - jsonPath: .spec.desiredState + name: STATE + type: string + - jsonPath: .status.foundDependencies + name: DEP-FOUND + type: string + - jsonPath: .status.installedDependencies + name: DEP-INSTALLED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: A FunctionRevision that has been added to Crossplane. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + endpoint: + description: Endpoint is the gRPC endpoint where Crossplane will send + RunFunctionRequests. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PackageRevisionSpec specifies the desired state of a PackageRevision. + properties: + commonLabels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + controllerConfigRef: + description: ControllerConfigRef references a ControllerConfig resource + that will be used to configure the packaged controller Deployment. + properties: + name: + description: Name of the ControllerConfig. + type: string + required: + - name + type: object + desiredState: + description: DesiredState of the PackageRevision. Can be either Active + or Inactive. + type: string + essTLSSecretName: + description: ESSTLSSecretName is the secret name of the TLS certificates + that will be used by the provider for External Secret Stores. + type: string + ignoreCrossplaneConstraints: + default: false + description: IgnoreCrossplaneConstraints indicates to the package + manager whether to honor Crossplane version constrains specified + by the package. Default is false. + type: boolean + image: + description: Package image used by install Pod to extract package + contents. + type: string + packagePullPolicy: + default: IfNotPresent + description: PackagePullPolicy defines the pull policy for the package. + It is also applied to any images pulled for the package, such as + a provider's controller image. Default is IfNotPresent. + type: string + packagePullSecrets: + description: PackagePullSecrets are named secrets in the same namespace + that can be used to fetch packages from private registries. They + are also applied to any images pulled for the package, such as a + provider's controller image. + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revision: + description: Revision number. Indicates when the revision will be + garbage collected based on the parent's RevisionHistoryLimit. + format: int64 + type: integer + skipDependencyResolution: + default: false + description: SkipDependencyResolution indicates to the package manager + whether to skip resolving dependencies for a package. Setting this + value to true may have unintended consequences. Default is false. + type: boolean + webhookTLSSecretName: + description: WebhookTLSSecretName is the name of the TLS Secret that + will be used by the provider to serve a TLS-enabled webhook server. + The certificate will be injected to webhook configurations as well + as CRD conversion webhook strategy if needed. If it's not given, + provider will not have a certificate mounted to its filesystem, + webhook configurations won't be deployed and if there is a CRD with + webhook conversion strategy, the installation will fail. + type: string + required: + - desiredState + - image + - revision + type: object + status: + description: PackageRevisionStatus represents the observed state of a + PackageRevision. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + controllerRef: + description: ControllerRef references the controller (e.g. Deployment), + if any, that is responsible for reconciling the objects this package + revision installed. + properties: + name: + description: Name of the controller. + type: string + required: + - name + type: object + foundDependencies: + description: Dependency information. + format: int64 + type: integer + installedDependencies: + format: int64 + type: integer + invalidDependencies: + format: int64 + type: integer + objectRefs: + description: References to objects owned by PackageRevision. + items: + description: A TypedReference refers to an object by Name, Kind, + and APIVersion. It is commonly used to reference cluster-scoped + objects or objects where the namespace is already known. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + type: array + permissionRequests: + description: PermissionRequests made by this package. The package + declares that its controller needs these permissions to run. The + RBAC manager is responsible for granting them. + items: + description: PolicyRule holds information that describes a policy + rule, but does not contain information about who the rule applies + to or which namespace the rule applies to. + properties: + apiGroups: + description: APIGroups is the name of the APIGroup that contains + the resources. If multiple API groups are specified, any + action requested against one of the enumerated resources in + any API group will be allowed. "" represents the core API + group and "*" represents all API groups. + items: + type: string + type: array + nonResourceURLs: + description: NonResourceURLs is a set of partial urls that a + user should have access to. *s are allowed, but only as the + full, final step in the path Since non-resource URLs are not + namespaced, this field is only applicable for ClusterRoles + referenced from a ClusterRoleBinding. Rules can either apply + to API resources (such as "pods" or "secrets") or non-resource + URL paths (such as "/api"), but not both. + items: + type: string + type: array + resourceNames: + description: ResourceNames is an optional white list of names + that the rule applies to. An empty set means that everything + is allowed. + items: + type: string + type: array + resources: + description: Resources is a list of resources this rule applies + to. '*' represents all resources. + items: + type: string + type: array + verbs: + description: Verbs is a list of Verbs that apply to ALL the + ResourceKinds contained in this rule. '*' represents all verbs. + items: + type: string + type: array + required: + - verbs + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml new file mode 100644 index 000000000..4c154d86d --- /dev/null +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -0,0 +1,171 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: functions.pkg.crossplane.io +spec: + group: pkg.crossplane.io + names: + categories: + - crossplane + - pkg + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: INSTALLED + type: string + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.package + name: PACKAGE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Function is the CRD type for a request to deploy a long-running + Function. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FunctionSpec specifies the configuration of a Function. + properties: + commonLabels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + ignoreCrossplaneConstraints: + default: false + description: IgnoreCrossplaneConstraints indicates to the package + manager whether to honor Crossplane version constrains specified + by the package. Default is false. + type: boolean + package: + description: Package is the name of the package that is being requested. + type: string + packagePullPolicy: + default: IfNotPresent + description: PackagePullPolicy defines the pull policy for the package. + Default is IfNotPresent. + type: string + packagePullSecrets: + description: PackagePullSecrets are named secrets in the same namespace + that can be used to fetch packages from private registries. + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revisionActivationPolicy: + default: Automatic + description: RevisionActivationPolicy specifies how the package controller + should update from one revision to the next. Options are Automatic + or Manual. Default is Automatic. + type: string + revisionHistoryLimit: + default: 1 + description: RevisionHistoryLimit dictates how the package controller + cleans up old inactive package revisions. Defaults to 1. Can be + disabled by explicitly setting to 0. + format: int64 + type: integer + skipDependencyResolution: + default: false + description: SkipDependencyResolution indicates to the package manager + whether to skip resolving dependencies for a package. Setting this + value to true may have unintended consequences. Default is false. + type: boolean + required: + - package + type: object + status: + description: FunctionStatus represents the observed state of a Function. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + currentIdentifier: + description: CurrentIdentifier is the most recent package source that + was used to produce a revision. The package manager uses this field + to determine whether to check for package updates for a given source + when packagePullPolicy is set to IfNotPresent. Manually removing + this field will cause the package manager to check that the current + revision is correct for the given package source. + type: string + currentRevision: + description: CurrentRevision is the name of the current package revision. + It will reflect the most up to date revision, whether it has been + activated or not. + type: string + endpoint: + description: Endpoint is the gRPC endpoint where Crossplane will send + RunFunctionRequests. + type: string + type: object + required: + - spec + - status + type: object + served: true + storage: true + subresources: + status: {} diff --git a/cluster/crds/pkg.crossplane.io_locks.yaml b/cluster/crds/pkg.crossplane.io_locks.yaml index 3e651c690..35b9bdce5 100644 --- a/cluster/crds/pkg.crossplane.io_locks.yaml +++ b/cluster/crds/pkg.crossplane.io_locks.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: locks.pkg.crossplane.io spec: group: pkg.crossplane.io diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index 2bce681e7..09ed346da 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: providerrevisions.pkg.crossplane.io spec: group: pkg.crossplane.io diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index 24aec830c..3c688ed08 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: providers.pkg.crossplane.io spec: group: pkg.crossplane.io diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index b25f5a857..8ac52a44a 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: storeconfigs.secrets.crossplane.io spec: group: secrets.crossplane.io diff --git a/cluster/images/xfn/Dockerfile b/cluster/images/xfn/Dockerfile index 47325069d..16dfce7ac 100644 --- a/cluster/images/xfn/Dockerfile +++ b/cluster/images/xfn/Dockerfile @@ -1,5 +1,5 @@ # This is debian:bookworm-slim (i.e. Debian 12, testing) -FROM debian:bookworm-slim@sha256:d8f9d38c21495b04d1cca99805fbb383856e19794265684019bf193c3b7d67f9 +FROM debian:bookworm-slim@sha256:9bd077d2f77c754f4f7f5ee9e6ded9ff1dff92c6dce877754da21b917c122c77 ARG TARGETOS ARG TARGETARCH diff --git a/cluster/kustomization.yaml b/cluster/kustomization.yaml index 95ac09ad3..201efc3bc 100644 --- a/cluster/kustomization.yaml +++ b/cluster/kustomization.yaml @@ -8,6 +8,8 @@ resources: - crds/pkg.crossplane.io_configurationrevisions.yaml - crds/pkg.crossplane.io_configurations.yaml - crds/pkg.crossplane.io_controllerconfigs.yaml +- crds/pkg.crossplane.io_functionrevisions.yaml +- crds/pkg.crossplane.io_functions.yaml - crds/pkg.crossplane.io_locks.yaml - crds/pkg.crossplane.io_providerrevisions.yaml - crds/pkg.crossplane.io_providers.yaml diff --git a/cluster/meta/meta.pkg.crossplane.io_configurations.yaml b/cluster/meta/meta.pkg.crossplane.io_configurations.yaml index 08287e50b..0c9ef7f7f 100644 --- a/cluster/meta/meta.pkg.crossplane.io_configurations.yaml +++ b/cluster/meta/meta.pkg.crossplane.io_configurations.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: configurations.meta.pkg.crossplane.io spec: group: meta.pkg.crossplane.io diff --git a/cluster/meta/meta.pkg.crossplane.io_functions.yaml b/cluster/meta/meta.pkg.crossplane.io_functions.yaml new file mode 100644 index 000000000..2d197bd5d --- /dev/null +++ b/cluster/meta/meta.pkg.crossplane.io_functions.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: functions.meta.pkg.crossplane.io +spec: + group: meta.pkg.crossplane.io + names: + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: A Function is the description of a Crossplane Function package. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FunctionSpec specifies the configuration of a Function. + properties: + crossplane: + description: Semantic version constraints of Crossplane that package + is compatible with. + properties: + version: + description: Semantic version constraints of Crossplane that package + is compatible with. + type: string + required: + - version + type: object + dependsOn: + description: Dependencies on other packages. + items: + description: Dependency is a dependency on another package. One + of Provider or Configuration may be supplied. + properties: + configuration: + description: Configuration is the name of a Configuration package + image. + type: string + provider: + description: Provider is the name of a Provider package image. + type: string + version: + description: Version is the semantic version constraints of + the dependency image. + type: string + required: + - version + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/cluster/meta/meta.pkg.crossplane.io_providers.yaml b/cluster/meta/meta.pkg.crossplane.io_providers.yaml index 1040e7af7..c944df06f 100644 --- a/cluster/meta/meta.pkg.crossplane.io_providers.yaml +++ b/cluster/meta/meta.pkg.crossplane.io_providers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.12.1 name: providers.meta.pkg.crossplane.io spec: group: meta.pkg.crossplane.io diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index a78a33130..e930264d1 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -47,6 +47,7 @@ import ( pkgcontroller "github.com/crossplane/crossplane/internal/controller/pkg/controller" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/initializer" + "github.com/crossplane/crossplane/internal/oci" "github.com/crossplane/crossplane/internal/transport" "github.com/crossplane/crossplane/internal/validation/apiextensions/v1/composition" "github.com/crossplane/crossplane/internal/xpkg" @@ -225,6 +226,7 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli Options: o, Namespace: c.Namespace, ServiceAccount: c.ServiceAccount, + Registry: c.Registry, } if err := apiextensions.Setup(mgr, ao); err != nil { @@ -243,7 +245,7 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli } if c.CABundlePath != "" { - rootCAs, err := xpkg.ParseCertificatesFromPath(c.CABundlePath) + rootCAs, err := oci.ParseCertificatesFromPath(c.CABundlePath) if err != nil { return errors.Wrap(err, "Cannot parse CA bundle") } diff --git a/cmd/crossplane/core/init.go b/cmd/crossplane/core/init.go index 2f062c897..d6be87f8c 100644 --- a/cmd/crossplane/core/init.go +++ b/cmd/crossplane/core/init.go @@ -70,10 +70,16 @@ func (c *initCommand) Run(s *runtime.Scheme, log logging.Logger) error { steps = append(steps, initializer.NewWebhookCertificateGenerator(nn, c.Namespace, log.WithValues("Step", "WebhookCertificateGenerator")), + initializer.NewCoreCRDsMigrator("compositionrevisions.apiextensions.crossplane.io", "v1alpha1"), + initializer.NewCoreCRDsMigrator("locks.pkg.crossplane.io", "v1alpha1"), initializer.NewCoreCRDs("/crds", s, initializer.WithWebhookTLSSecretRef(nn)), initializer.NewWebhookConfigurations("/webhookconfigurations", s, nn, svc)) } else { - steps = append(steps, initializer.NewCoreCRDs("/crds", s)) + steps = append(steps, + initializer.NewCoreCRDsMigrator("compositionrevisions.apiextensions.crossplane.io", "v1alpha1"), + initializer.NewCoreCRDsMigrator("locks.pkg.crossplane.io", "v1alpha1"), + initializer.NewCoreCRDs("/crds", s), + ) } if c.ESSTLSClientSecretName != "" && c.ESSTLSServerSecretName != "" { diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index 3c6b05153..6eab7c47f 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -19,6 +19,7 @@ package main import ( "fmt" + "io" "github.com/alecthomas/kong" admv1 "k8s.io/api/admissionregistration/v1" @@ -78,7 +79,11 @@ func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // Bef func main() { zl := zap.New().WithName("crossplane") - + // Setting the controller-runtime logger to a no-op logger by default, + // unless debug mode is enabled. This is because the controller-runtime + // logger is *very* verbose even at info level. This is not really needed, + // but otherwise we get a warning from the controller-runtime. + ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) // Note that the controller managers scheme must be a superset of the // package manager's object scheme; it must contain all object types that // may appear in a Crossplane package. This is because the package manager diff --git a/cmd/xfn/main.go b/cmd/xfn/main.go index 0f1229431..25fab67fb 100644 --- a/cmd/xfn/main.go +++ b/cmd/xfn/main.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/alecthomas/kong" + "github.com/google/go-containerregistry/pkg/name" "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/crossplane/crossplane-runtime/pkg/logging" @@ -34,10 +35,17 @@ import ( type debugFlag bool type versionFlag bool +// KongVars represent the kong variables associated with the CLI parser +// required for the Registry default variable interpolation. +var KongVars = kong.Vars{ + "default_registry": name.DefaultRegistry, +} + var cli struct { Debug debugFlag `short:"d" help:"Print verbose logging statements."` - Version versionFlag `short:"v" help:"Print version and quit."` + Version versionFlag `short:"v" help:"Print version and quit."` + Registry string `short:"r" help:"Default registry used to fetch containers when not specified in tag." default:"${default_registry}" env:"REGISTRY"` Start start.Command `cmd:"" help:"Start listening for Composition Function runs over gRPC." default:"1"` Run run.Command `cmd:"" help:"Run a Composition Function."` @@ -69,6 +77,7 @@ func main() { kong.Description("Crossplane Composition Functions."), kong.BindTo(logging.NewLogrLogger(zl), (*logging.Logger)(nil)), kong.UsageOnError(), + KongVars, ) - ctx.FatalIfErrorf(ctx.Run()) + ctx.FatalIfErrorf(ctx.Run(&start.Args{Registry: cli.Registry})) } diff --git a/cmd/xfn/run/run.go b/cmd/xfn/run/run.go index c2699bf58..e1efae90e 100644 --- a/cmd/xfn/run/run.go +++ b/cmd/xfn/run/run.go @@ -30,6 +30,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1" + "github.com/crossplane/crossplane/cmd/xfn/start" "github.com/crossplane/crossplane/internal/xfn" ) @@ -59,7 +60,7 @@ type Command struct { } // Run a Composition container function. -func (c *Command) Run() error { +func (c *Command) Run(args *start.Args) error { // If we don't have CAP_SETUID or CAP_SETGID, we'll only be able to map our // own UID and GID to root inside the user namespace. rootUID := os.Getuid() @@ -70,7 +71,7 @@ func (c *Command) Run() error { rootGID = c.MapRootGID } - ref, err := name.ParseReference(c.Image) + ref, err := name.ParseReference(c.Image, name.WithDefaultRegistry(args.Registry)) if err != nil { return errors.Wrap(err, errParseImage) } @@ -90,7 +91,7 @@ func (c *Command) Run() error { return errors.Wrap(err, errAuthCfg) } - f := xfn.NewContainerRunner(xfn.SetUID(setuid), xfn.MapToRoot(rootUID, rootGID), xfn.WithCacheDir(filepath.Clean(c.CacheDir))) + f := xfn.NewContainerRunner(xfn.SetUID(setuid), xfn.MapToRoot(rootUID, rootGID), xfn.WithCacheDir(filepath.Clean(c.CacheDir)), xfn.WithRegistry(args.Registry)) rsp, err := f.RunFunction(context.Background(), &v1alpha1.RunFunctionRequest{ Image: c.Image, Input: c.FunctionIO, diff --git a/cmd/xfn/spark/spark.go b/cmd/xfn/spark/spark.go index 32c4f70b7..09b9f043f 100644 --- a/cmd/xfn/spark/spark.go +++ b/cmd/xfn/spark/spark.go @@ -35,6 +35,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1" + "github.com/crossplane/crossplane/cmd/xfn/start" "github.com/crossplane/crossplane/internal/oci" "github.com/crossplane/crossplane/internal/oci/spec" "github.com/crossplane/crossplane/internal/oci/store" @@ -74,12 +75,13 @@ type Command struct { CacheDir string `short:"c" help:"Directory used for caching function images and containers." default:"/xfn"` Runtime string `help:"OCI runtime binary to invoke." default:"crun"` MaxStdioBytes int64 `help:"Maximum size of stdout and stderr for functions." default:"0"` + CABundlePath string `help:"Additional CA bundle to use when fetching function images from registry." env:"CA_BUNDLE_PATH"` } // Run a Composition Function inside an unprivileged user namespace. Reads a // protocol buffer serialized RunFunctionRequest from stdin, and writes a // protocol buffer serialized RunFunctionResponse to stdout. -func (c *Command) Run() error { //nolint:gocyclo // TODO(negz): Refactor some of this out into functions, add tests. +func (c *Command) Run(args *start.Args) error { //nolint:gocyclo // TODO(negz): Refactor some of this out into functions, add tests. pb, err := io.ReadAll(os.Stdin) if err != nil { return errors.Wrap(err, errReadRequest) @@ -121,17 +123,25 @@ func (c *Command) Run() error { //nolint:gocyclo // TODO(negz): Refactor some of return errors.Wrap(err, errNewDigestStore) } - r, err := name.ParseReference(req.GetImage()) + r, err := name.ParseReference(req.GetImage(), name.WithDefaultRegistry(args.Registry)) if err != nil { return errors.Wrap(err, errParseRef) } + opts := []oci.ImageClientOption{FromImagePullConfig(req.GetImagePullConfig())} + if c.CABundlePath != "" { + rootCA, err := oci.ParseCertificatesFromPath(c.CABundlePath) + if err != nil { + return errors.Wrap(err, "Cannot parse CA bundle") + } + opts = append(opts, oci.WithCustomCA(rootCA)) + } // We cache every image we pull to the filesystem. Layers are cached as // uncompressed tarballs. This allows them to be extracted quickly when // using the uncompressed.Bundler, which extracts a new root filesystem for // every container run. p := oci.NewCachingPuller(h, store.NewImage(c.CacheDir), &oci.RemoteClient{}) - img, err := p.Image(ctx, r, FromImagePullConfig(req.GetImagePullConfig())) + img, err := p.Image(ctx, r, opts...) if err != nil { return errors.Wrap(err, errPull) } diff --git a/cmd/xfn/start/start.go b/cmd/xfn/start/start.go index 3b6e35bf6..921335df8 100644 --- a/cmd/xfn/start/start.go +++ b/cmd/xfn/start/start.go @@ -33,6 +33,11 @@ const ( errListenAndServe = "cannot listen for and serve gRPC API" ) +// Args contains the default registry used to pull XFN containers. +type Args struct { + Registry string +} + // Command starts a gRPC API to run Composition Functions. type Command struct { CacheDir string `short:"c" help:"Directory used for caching function images and containers." default:"/xfn"` @@ -43,7 +48,7 @@ type Command struct { } // Run a Composition Function gRPC API. -func (c *Command) Run(log logging.Logger) error { +func (c *Command) Run(args *Args, log logging.Logger) error { // If we don't have CAP_SETUID or CAP_SETGID, we'll only be able to map our // own UID and GID to root inside the user namespace. rootUID := os.Getuid() @@ -59,6 +64,7 @@ func (c *Command) Run(log logging.Logger) error { xfn.SetUID(setuid), xfn.MapToRoot(rootUID, rootGID), xfn.WithCacheDir(filepath.Clean(c.CacheDir)), - xfn.WithLogger(log)) + xfn.WithLogger(log), + xfn.WithRegistry(args.Registry)) return errors.Wrap(f.ListenAndServe(c.Network, c.Address), errListenAndServe) } diff --git a/contributing/guide-observability.md b/contributing/guide-observability.md index 9343d0bc7..9c5e077f2 100644 --- a/contributing/guide-observability.md +++ b/contributing/guide-observability.md @@ -50,6 +50,9 @@ Events should be recorded in the following cases: * The state of a resource is changed * An error occurs +Events should *not* be recorded if nothing happens or changes, with the exception +of repeated errors. + The events recorded in these cases can be thought of as forming an event log of things that happen for the resources that Crossplane manages. Each event should refer back to the relevant controller and resource, and use other fields of the @@ -120,6 +123,86 @@ logs don't show up in production normally. For the question of what constitutes an error, errors should be actionable by a human. See the [Dave Cheney article] on this topic for some more discussion. +## Good Errors + +An error message is good if it communicates + +1. what went wrong +2. where it went wrong +3. for which inputs + +and if it is concise without redundancies and at best displayable in one line. + +The general goal of an error message is to be actionable for the user who is +trying to avoid the error in the future. + +Examples for bad error messages: +- `cannot resolve package dependencies: Invalid Semantic Version` +- `cannot parse file foo.yaml: failed to parse YAML of file foo.yaml` + +Examples for how to improve them: +- `cannot resolve package dependencies in crossplane.yaml: invalid Semantic Version "main" on package "provider-kubernetes"` +- `failed to load OCI image provider-gcp/v0.42.0: failed to unpack layer: cannot parse file: failed to parse YAML of file foo.yaml: unexpected indention in line 4`. + +General rules: +1. follow the pattern `generic error cause: more details: even more details`. +2. put values into quotes (`%q` in Golang), with the following exceptions + where quotes don't add to readability: + - resource names with namespace or resource type (e.g. `default/foo` or + `providers/foo`), resource names alone are quoted + - kinds (e.g. `cannot find provider "provider-kubernetes"`; here, the kind + provider is without quotes, the provider name is with) + - filenames with extensions and/or path (e.g. `cannot parse foo.yaml`). +3. add a context of e.g. filenames, object names, or values which are wrong + (and the values are known to be insensitive). + + Which context is added by which functions depends on the level of + abstraction of the code at hand: the package loader will know the OCI + image name, the YAML parser might only see a byte slice. The former adds + the image name as context, some layer in-between adds the filename and + the YAML parser adds the line and column in the byte stream. + + Filenames should be relative to the logical working directory of the + context at hand (e.g. a package root, current working directory, + workspace root). Ensure that the context is clear in the error message (e.g. + `failed to load package "provider-kubernetes": failed to parse apis/xrd.yaml`). + +4. add a context just once. + + Rule of thumb: if a function gets a parameter, it will also include it in the + error messages, so the caller does not have to. + + E.g. a function getting a filename as input will include the filename in + errors it returns. +5. don't return multi-line errors. +6. aggregate errors if there is clear use for the user, but not in general, i.e. + fail fast is the default. + + Aggregate through `kerrors.NewAggregate`. When the number of aggregated + errors is unbounded, return a subset, e.g. the first 3. + + E.g.: `cannot parse workspace: failed to parse foo.yaml, bar.yaml, abc.yaml, + and 17 more files` +7. don't include sensitive information like tokens into error messages, not even + in debug mode. + + These are usually considered insensitive: field names, file paths, resources, + kinds, object names, namespaces, labels, enum values, numbers, booleans. +8. use error message constants, e.g. `errSomethingWentWrong` for messages + without interpolation, and `errFmtSomethingWentWrong` for those with + interpolation. +9. use [wrapping errors] and github.com/crossplane/crossplane-runtime/pkg/errors + in general. +10. error messages must be deterministic. Use sorting in loops over maps or sort + returned errors to ensure determinism. + + Background: one never knows how an error is displayed. Not being + deterministic can lead to hot-loops with constant API updates, e.g. of a + condition. + +11. error messages are good candidates to be included in condition or event + messages. + ## In Practice Crossplane provides two observability libraries as part of crossplane-runtime: @@ -187,5 +270,5 @@ implementations. [not]: https://dave.cheney.net/2017/01/23/the-package-level-logger-anti-pattern [`Reconciler`]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler [managed resource reconciler]: https://github.com/crossplane/crossplane-runtime/blob/a6bb0/pkg/reconciler/managed/reconciler.go#L436 -[wrapping errors]: https://godoc.org/github.com/pkg/errors#Wrap +[wrapping errors]: https://godoc.org/github.com/crossplane/crossplane-runtime/pkg/errors#Wrap [API conventions]: https://github.com/kubernetes/community/blob/09f55c6/contributors/devel/sig-architecture/api-conventions.md#events diff --git a/design/assets/design-doc-composition-functions/containerized-functions.png b/design/assets/design-doc-composition-functions/containerized-functions.png new file mode 100644 index 000000000..819b348f6 Binary files /dev/null and b/design/assets/design-doc-composition-functions/containerized-functions.png differ diff --git a/design/assets/design-doc-composition-functions/functions.png b/design/assets/design-doc-composition-functions/functions.png new file mode 100644 index 000000000..c4f1bc4aa Binary files /dev/null and b/design/assets/design-doc-composition-functions/functions.png differ diff --git a/design/defunct/design-doc-composition-functions.md b/design/defunct/design-doc-composition-functions.md new file mode 100644 index 000000000..1ca91d9b7 --- /dev/null +++ b/design/defunct/design-doc-composition-functions.md @@ -0,0 +1,778 @@ + # Composition Functions + +* Owners: Nic Cope (@negz), Sergen Yalçın (@sergenyalcin) +* Reviewers: Crossplane Maintainers +* Status: Defunct + +## Background + +Crossplane is a framework for building cloud native control planes. These +control planes sit one level above the cloud providers and allow you to +customize the APIs they expose. Platform teams use Crossplane to offer the +developers they support simpler, safer, self-service interfaces to the cloud. + +To build a control plane with Crossplane you: + +1. Define the APIs you’d like your control plane to expose. +1. Extend Crossplane with support for orchestrating external resources (e.g. + AWS). +1. Configure which external resources to orchestrate when someone calls your + APIs. + +Crossplane offers a handful of extension points that layer atop each other to +help make this possible: + +* Providers extend Crossplane with Managed Resources (MRs), which are high + fidelity, declarative representations of external APIs. Crossplane reconciles + MRs by orchestrating an external system (e.g. AWS). +* Configurations extend Crossplane with Composite Resources (XRs), which are + essentially arbitrary APIs, and Compositions. Crossplane reconciles XRs by + orchestrating MRs. Compositions teach Crossplane how to do this. + +The functionality enabled by XRs and Compositions is typically referred to as +simply Composition. Support for Composition was added in Crossplane [v0.10.0] +(April 2020). From our [terminology documentation][term-composition]: + +> Folks accustomed to Terraform might think of a Composition as a Terraform +> module; the HCL code that describes how to take input variables and use them +> to create resources in some cloud API. Folks accustomed to Helm might think of +> a Composition as a Helm chart’s templates; the moustache templated YAML files +> that describe how to take Helm chart values and render Kubernetes resources. + +A Crossplane `Composition` consists of an array of one or more 'base' +resources. Each of these resources can be 'patched' with values derived from the +XR. The functionality enabled by a `Composition` is intentionally limited - for +example there is no support for conditionals (e.g. only create this resource if +the following conditions are met) or iteration (e.g. create N of the following +resource, where N is derived from an XR field). + +Below is an example `Composition`: + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: example +spec: + compositeTypeRef: + apiVersion: database.example.org/v1alpha1 + kind: AcmeCoDatabase + resources: + - name: cloudsqlinstance + base: + apiVersion: database.gcp.crossplane.io/v1beta1 + kind: CloudSQLInstance + spec: + forProvider: + databaseVersion: POSTGRES_9_6 + region: us-central1 + settings: + tier: db-custom-1-3840 + dataDiskType: PD_SSD + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.parameters.storageGB + toFieldPath: spec.forProvider.settings.dataDiskSizeGb +``` + +The goals of the Crossplane maintainers in designing Composition were to: + +* Empower platform teams to provide a platform of useful, opinionated + abstractions. +* Enable platform teams to define abstractions that may be portable across + different infrastructure providers and application runtimes. +* Enable platform teams to share and reuse the abstractions they define. +* Leverage the Kubernetes Resource Model (KRM) to model applications, + infrastructure, and the product of the two in a predictable, safe, and + declarative fashion using low or no code. +* Avoid imposing unnecessary opinions, assumptions, or constraints around how + applications and infrastructure should function. + +The design document for Composition [captures these goals][composition-design] +using somewhat dated parlance. + +Our approach to achieving our goals was heavily informed by Brian Grant’s +[Declarative application management in Kubernetes][declarative-app-management]. +Brian’s document is an excellent summary of the gotchas and pitfalls faced by +those attempting to design new configuration management tools, informed by his +experiences designing Kubernetes, its precursor Borg, and many generations of +configuration languages at Google, including [BCL/GCL][bcl]. In particular, we +wanted to: + +* Avoid organically growing a new DSL. These languages tend to devolve to + incoherency as stakeholders push to “bolt on” new functionality to solve + pressing problems at the expense of measured design. Terraform’s DSL + unintuitively [supporting the count argument in some places but not + others][terraform-count] is a great example of this. Inventing a new DSL also + comes with the cost of inventing new tooling to test, lint, generate, etc, + your DSL. +* Stick to configuration that could be modeled as a REST API. Modeling + Composition logic as a schemafied API resource makes it possible for + Crossplane to validate that logic and provide feedback to the platform team at + configuration time. It also greatly increases interoperability thanks to broad + support across tools and languages for interacting with REST APIs. + +It was also important to avoid the “worst of both worlds” - i.e. growing a fully +featured ([Turing-complete][turing-complete]) DSL modeled as a REST API. To this +end we omitted common language features such as conditionals and iteration. Our +rationale being that these features were better deferred to a General Purpose +Programming Language (GPL) designed by language experts, and with extensive +existing tooling. + +Since its inception the Crossplane maintainers’ vision has been that there +should essentially be two variants of Composition: + +* For simple cases, use contemporary "Patch and Transform" (P&T) Composition. +* For advanced cases, bring your tool or programming language of choice. + +In this context a “simple case” might involve composing fewer than ten resources +without the need for logic such as conditionals and iteration. Note that the +Composition logic, whether P&T or deferred to a tool or programming language, is +always behind the API line (behind an XR). This means the distinction is only +important to the people authoring the Compositions, never to the people +consuming them. + +Offering two variants of Composition allows Crossplane users to pick the one +that is best aligned with their situation, preferences, and experience level. +For simple cases you don’t need to learn a new programming language or tool, and +there are no external dependencies - just write familiar, Kubernetes-style YAML. +For advanced cases leverage proven tools and languages with existing ecosystems +and documentation. Either way, Crossplane has no religion - if you prefer not to +“write YAML”, pick another tool and vice versa. + +## Goals + +The proposal put forward by this document should: + +* Let folks use their composition tool and/or programming language of choice. +* Support 'advanced' composition logic such as loops and conditionals. +* Balance safety (e.g. sandboxing) with speed and simplicity. +* Be possible to introduce behind a feature flag that is off by default. + +While not an explicit goal, it would also be ideal if the solution put forth by +this document could serve as a test bed for new features in the contemporary +'resources array' based form of Composition. + +The user experience around authoring and maintaining Composition Functions is +out of scope for this proposal, which focuses only on adding foundational +support for the feature to Crossplane. + +## Proposal + +### Overview + +This document proposes that a new `functions` array be added to the existing +`Composition` type. This array of functions would be called either instead of or +in addition to the existing `resources` array in order to determine how an XR +should be composed. The array of functions acts as a pipeline; the output of +each function is passed as the input to the next. The output of the final +function tells Crossplane what must be done to reconcile the XR. + +```yaml +apiVersion: apiextensions.crossplane.io/v2alpha1 +kind: Composition +metadata: + name: example +spec: + compositeTypeRef: + apiVersion: database.example.org/v1alpha1 + kind: XPostgreSQLInstance + functions: + - name: my-cool-function + type: Container + container: + image: xkpg.io/my-cool-function:0.1.0 +``` + +Under this proposal each function is the entrypoint of an OCI image, though the +API is designed to support different function implementations (such as webhooks) +in the future. The updated API would affect only the `Composition` type - no +changes would be required to the schema of `CompositeResourceDefinitions`, XRs, +etc. + +Notably the functions would not be responsible for interacting with the API +server to create, update, or delete composed resources. Instead, they instruct +Crossplane which resources should be created, updated, or deleted. + +Under the proposed design functions could also be used for purposes besides +rendering composed resources, for example validating the results of the +`resources` array or earlier functions in the `functions` array. Furthermore, a +function could also be used to implement 'side effects' such as triggering a +replication or backup. + +Below is a more detailed example of an entry in the `functions` array. + +```yaml +apiVersion: apiextensions.crossplane.io/v2alpha1 +kind: Composition +metadata: + name: example +spec: + compositeTypeRef: + apiVersion: database.example.org/v1alpha1 + kind: XPostgreSQLInstance + functions: + - name: my-cool-function + type: Container + # Configuration specific to `type: Container` functions. + container: + # The OCI image to pull and run. + image: xkpg.io/my-cool-function:0.1.0 + # Whether to pull the function Never, Always, or IfNotPresent. + imagePullPolicy: IfNotPresent + # Secrets used to pull from a private registry. + imagePullSecrets: + - namespace: crossplane-system + name: my-xpkg-io-creds + # Note that only resource limits are supported - not requests. + # The function will be run with the specified resource limits. + resources: + limits: + memory: 64Mi + cpu: 250m + # Defaults to 'Isolated' - i.e an isolated network namespace. + network: Accessible + # How long the function may run before it's killed. Defaults to 10s. + timeout: 30s + # Containers are run by an external process listening at the supplied + # endpoint. Specifying an endpoint is optional; the endpoint defaults to + # the below value. + runner: + endpoint: unix:///@crossplane/fn/default.sock + # An x-kubernetes-embedded-resource RawExtension (i.e. an unschemafied + # Kubernetes resource). Passed to the function as the config block of its + # FunctionIO. + config: + apiVersion: database.example.org/v1alpha1 + kind: Config + metadata: + name: cloudsql + spec: + version: POSTGRES_9_6 +``` + +### Function API + +This document proposes that each function uses a `FunctionIO` type as its input +and output. In the case of `Container` functions this would correspond to stdin +and stdout. Crossplane would be responsible for reading stdout from the final +function and applying its changes to the relevant XR and composed resources. + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: FunctionIO +config: + apiVersion: database.example.org/v1alpha1 + kind: Config + metadata: + name: cloudsql + spec: + version: POSTGRES_9_6 +observed: + composite: + resource: + apiVersion: database.example.org/v1alpha1 + kind: XPostgreSQLInstance + metadata: + name: my-db + spec: + parameters: + storageGB: 20 + compositionSelector: + matchLabels: + provider: gcp + status: + conditions: + - type: Ready + status: True + connectionDetails: + - name: uri + value: postgresql://db.example.org:5432 +``` + +A `FunctionIO` resource consists of the following top-level fields: + +* The `apiVersion` and `kind` (required). +* A `config` object (optional). This is a [Kubernetes resource][rawextension] + with an arbitrary schema that may be used to provide additional configuration + to a function. For example a `render-helm-chart` function might use its + `config` to specify which Helm chart to render. Functions need not return + their `config`, and any mutations will be ignored. +* An `observed` object (required). This reflects the observed state of the XR, + any existing composed resources, and their connection details. Functions must + return the `observed` object unmodified. +* A `desired` object (optional). This reflects the accumulated desired state of + the XR and any composed resources. Functions may mutate the `desired` object. +* A `results` array (optional). Used to communicate information about the result + of a function, including warnings and errors. Functions may mutate the + `results` object. + +Each function takes its `config` (if any), `observed` state, and any previously +accumulated `desired` state as input, and optionally mutates the `desired` +state. This allows the output of one function to be the input to the next. + +The `observed` object consists of: + +* `observed.composite.resource`. The observed XR. +* `observed.composite.connectionDetails`: The observed XR connection details. +* `observed.resources[N].name`: The name of an observed composed resource. +* `observed.resources[N].resource`: An observed composed resource. +* `observed.resources[N].connectionDetails`: An observed composed resource's + current connection details. + +If an observed composed resource appears in the Composition's `spec.resources` +array their `name` fields will match. Note that the `name` field is distinct +from a composed resource's `metadata.name` - it is used to identify the resource +within a Composition and/or its function pipeline. + +The `desired` object consists of: + +* `desired.composite.resource`. The desired XR. +* `desired.composite.resource.connectionDetails`. Desired XR connection details. +* `desired.resources[N].name`. The name of a desired composed resource. +* `desired.resources[N].resource`. A desired composed resource. +* `desired.resources[N].connectionDetails`. A desired composed resource's + connection details. +* `desired.resources[N].readinessChecks`. A desired composed resource's + readiness checks. + +Note that the `desired.resources` array of the `FunctionIO` type is very +similar to the `spec.resources` array of the `Composition` type. In comparison: + +* `name` works the same across both types, but is required by `FunctionIO`. +* `connectionDetails` and `readinessChecks` work the same across both types. +* `FunctionIO` does not support `base` and `patches`. Instead, a function should + configure the `resource` field accordingly. + +The `desired` state is _accumulated_ across the Composition and all of its +functions. This means the first function may be passed desired state as +specified by the `spec.resources` array of a Composite, if any, and each +function must include the accumulated desired state in its output. Desired state +is treated as an overlay on observed state, so a function pipeline need not +specify the desired state of the XR (for example) unless a function wishes to +mutate it. + +A full `FunctionIO` specification will accompany the implementation. Some +example scenarios are illustrated below. + +A function that wanted to create (compose) a `CloudSQLInstance` would do so by +returning the following `FunctionIO`: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: FunctionIO +observed: {} # Omitted for brevity. +desired: + resources: + - name: cloudsqlinstance + resource: + apiVersion: database.gcp.crossplane.io/v1beta1 + kind: CloudSQLInstance + spec: + forProvider: + databaseVersion: POSTGRES_9_6 + region: us-central1 + settings: + tier: db-custom-1-3840 + dataDiskType: PD_SSD + dataDiskSizeGb: 20 + writeConnectionSecretToRef: + namespace: crossplane-system + name: cloudsqlpostgresql-conn + connectionDetails: + - name: hostname + fromConnectionSecretKey: hostname + readinessChecks: + - type: None +``` + +A function that wanted to set only an XR connection detail could return: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: FunctionIO +observed: {} # Omitted for brevity. +desired: + composite: + connectionDetails: + - type: FromValue + name: username + value: admin +``` + +A function wishing to delete a composed resource may do so by setting its +`resource` to null, for example: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: FunctionIO +observed: {} # Omitted for brevity. +desired: + resources: + - name: cloudsqlinstance + resource: null +``` + +A function that could not complete successfully could do so by returning the +following `FunctionIO`: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: FunctionIO +config: + apiVersion: database.example.org/v1alpha1 + kind: Config + metadata: + name: cloudsql + spec: + version: POSTGRES_9_6 +observed: {} # Omitted for brevity. +results: +- severity: Error + message: "Could not render Database.postgresql.crossplane.io/v1beta1` +``` + +### Running Container Function Pipelines + +While Crossplane typically runs in a Kubernetes cluster - a cluster designed to +run containers - running an ordered _pipeline_ of short-lived containers via +Kubernetes is much less straightforward than you might expect. Refer to +[Alternatives Considered](#alternatives-considered) for details. + +In order to provide flexibility and choice of tradeoffs in running containers +(e.g. speed, scalability, security) this document proposes Crossplane defer +containerized functions to an external runner. Communication with the runner +would occur via a gRPC API, with the runner expected to be listening at the +`endpoint` specified via the function's `runner` configuration block. This +endpoint would default to `unix:///@crossplane/fn/default.sock` - an abstract +[Unix domain socket][unix-domain-sockets]. + +Communication between Crossplane and a containerized function runner would use +the following API: + +```protobuf +syntax = "proto3"; + +// This service defines the APIs for a containerized function runner. +service ContainerizedFunctionRunner { + rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} +} + +// Corresponds to Kubernetes' image pull policy. +enum ImagePullPolicy { + IF_NOT_PRESENT = 0; + ALWAYS = 1; + NEVER = 2; +} + +// Corresponds to go-containerregistry's AuthConfig type. +// https://pkg.go.dev/github.com/google/go-containerregistry@v0.11.0/pkg/authn#AuthConfig +message ImagePullAuth { + string username = 1; + string password = 2; + string auth = 3; + string identity_token = 4; + string registry_token = 5; +} + +message ImagePullConfig { + ImagePullPolicy pull_policy = 1; + ImagePullAuth auth = 2; +} + +// Containers are run without network access (in an isolated network namespace) +// by default. +enum NetworkPolicy = { + ISOLATED = 0; + ACCESSIBLE = 1; +} + +// Only resource limits are supported. Resource requests could be added in +// future if a runner supported them (e.g. by running containers on Kubernetes). +message Resources { + ResourceLimits limits = 1; +} + +message ResourceLimits { + string memory = 1; + string cpu = 2; +} + +message RunFunctionConfig { + Resources resources = 1; + NetworkPolicy network = 2; + Duration timeout = 3; +} + +// The input FunctionIO is supplied as opaque bytes. +message RunFunctionRequest { + string image = 1; + bytes input = 2; + ImagePullConfig = 3; + RunFunctionConfig = 4; +} + +// The output FunctionIO is supplied as opaque bytes. Errors encountered while +// running a function (as opposed to errors returned _by_ a function) will be +// encapsulated as gRPC errors. +message RunFunctionResponse { + bytes output = 1; +} +``` + +### The Default Function Runner + +This document proposes that Crossplane include a default function runner. This +runner would be implemented as a sidecar to the core Crossplane container that +runs functions inside itself. + +The primary advantages of this approach are speed and control. There's no need +to wait for another system (for example the Kubernetes control plane) to +schedule each container, and the runner can easily pass stdout from one +container to another's stdin. Speed of function runs is of particular importance +given that each XR typically reconciles (i.e. invokes its function pipeline) +once every 60 seconds. + +The disadvantages of running the pipeline inside a sidecar container are scale +and reinvention of the wheel. The resources available to the sidecar container +will bound how many functions it can run at any one time, and it will need to +handle features that the Kubelet already offers such as pull secrets, caching +etc. + +[Rootless containers][rootless] appear to be the most promising way to run +functions as containers inside a container: + +> Rootless containers uses `user_namespaces(7)` (UserNS) for emulating fake +> privileges that are enough to create containers. The pseudo-root user gains +> capabilities such as `CAP_SYS_ADMIN` and `CAP_NET_ADMIN` inside UserNS to +> perform fake-privileged operations such as creating mount namespaces, network +> namespaces, and creating TAP devices. + +Using user namespaces allows the runner to use the other kinds of namespaces +listed above to ensure an extra layer of isolation for the functions it runs. +For example a network namespace could be configured to prevent a function having +network access. + +User namespaces are well supported by modern Linux Kernels, having been +introduced in Linux 3.8. Many OCI runtimes (including `runc`, `crun`, and +`runsc`) support rootless mode. `crun` appears to be the most promising choice +because: + +* It is more self-contained than `runc` (the reference and most commonly used + OCI runtime), which relies on setuid binaries to setup user namespaces. +* `runsc` (aka gVisor) uses extra defense in depth features which are not + allowed inside most containers due to their seccomp policies. + +Of course, "a container" is in fact many technologies working together and some +parts of rootless containers are less well supported than others; for example +cgroups v2 is required in order to limit resources like CPU and memory available +to a particular function. cgroups v2 has been available in Linux since 4.15, but +was not enabled by many distributions until 2021. In practice this means +Crossplane users must use a [sufficiently modern][cgroups-v2-distros] +distribution on their Kubernetes nodes in order to constrain the resources of a +Composition function. + +Similarly, [overlayfs] was not allowed inside user namespaces until Linux 5.11. +Overlayfs is typically used to create a root filesystem for a container that is +backed by a read-write 'upper' directory overlaid on a read-only 'lower' +directory. This allows the root OCI image filesystem to persist as a cache of +sorts, while changes made during the lifetime of a container can be easily +discarded. It's possible to replicate these benefits (at the expense of disk +usage and start-up time) by falling back to making a throwaway copy of the root +filesystem for each container run where overlayfs is not available. + +Under the approach proposed by this document each function run would involve the +following steps: + +1. Use [go-containerregistry] to pull the function's OCI image. +1. Extract (untar) the OCI image's flattened filesystem to disk. +1. Create a filesystem for the container - either an overlay or a copy of the + filesystem extracted in step 2. +1. Derive an [OCI runtime configuration][oci-rt-cfg] from the + [OCI image configuration][oci-img-cfg] supplied by go-containerregistry. +1. Execute `crun run` to invoke the function in a rootless container. + +Executing `crun` directly as opposed to using a higher level tool like `docker` +or `podman` allows the default function runner to avoid new dependencies apart +from a single static binary (i.e. `crun`). It keeps most functionality (pulling +images etc) inside the runner's codebase, delegating only container creation to +an external tool. Composition Functions are always short-lived and should always +have their stdin and stdout attached to the runner, so wrappers like +`containerd-shim` or `conmon` should not be required. The short-lived, "one +shot" nature of Composition Functions means it should also be acceptable to +`crun run` the container rather than using `crun create`, `crun start`, etc. + +At the time of writing rootless containers appear to be supported by Kubernetes, +including Amazon's Elastic Kubernetes Service (EKS) and Google Kubernetes Engine +(GKE). + +Testing using GKE 1.21.10-gke.2000 with Container Optimized OS (with containerd) +cos-89-16108-604-19 nodes (Kernel COS-5.4.170) found that it was possible to run +`unshare -rUm` (i.e. to create a new user and mount namespace) inside an Alpine +Linux container as long as AppArmor was disabled by applying the annotation +`container.apparmor.security.beta.kubernetes.io/${CONTAINER_NAME}=unconfined`. +It's possible to create user namespaces with AppArmor enabled, but not to create +mount namespaces with different mount propagation from their parent. + +It is not possible to use rootless containers with gVisor enabled, as gVisor +does not yet [support mount namespaces][gvisor-mountns]. This means that it is +not possible to use rootless containers with GKE Autopilot, which requires that +gVisor be used. + +Testing using EKS v1.21.5-eks-9017834 with Amazon Linux 2 nodes (Kernel +5.4.188-104.359.amzn2.x86_64) found that it was possible to run `unshare -rUm` +inside an Alpine Linux container 'out of the box'. + +The `unshare` syscall used to create containers is rejected by the default +Docker and containerd seccomp profiles. seccomp is disabled ("Unconstrained") by +default in Kubernetes, but that will soon change per [this KEP][kep-seccomp] +which proposes that Kubernetes use the seccomp profiles of its container engine +(i.e. containerd) by default. Once this happens Crossplane will either need to +run with the "Unconstrained" seccomp profile, or a variant of the default +containerd seccomp profile that allows a few extra syscalls (i.e. at least +`unshare` and `mount`). This can be done by setting a Pod's +`spec.securityContext.seccompProfile.type` field to `Unconstrained`. + +### Packaging Containerized Functions + +This document proposes that containerized functions support Crossplane [package +metadata][package-meta] in the form of a `package.yaml` file at the root of the +flattened filesystem and/or the OCI layer annotated as `io.crossplane.xpkg: +base` per the [xpkg spec][xpkg-spec]. This `package.yaml` file would contain a +custom-resource-like YAML document of type `Function.meta.pkg.crossplane.io`. + +Unlike `Configuration` and `Provider` packages, `Function` packages would not +actually be processed by the Crossplane package manager but rather by the +Composition (`apiextensions`) machinery. In practice Crossplane would be +ignorant of the `package.yaml` file; it would exist purely as a way to attach +"package-like" metadata to containerized Crossplane functions. Therefore, unlike +the existing package types the `package.yaml` would contain no `spec` section. + +An example `package.yaml` might look like: + +```yaml +# Required. Must be as below. +apiVersion: meta.pkg.crossplane.io/v1alpha1 +# Required. Must be as below. +kind: Function +# Required. +metadata: + # Required. Must comply with Kubernetes API conventions. + name: function-example + # Optional. Must comply with Kubernetes API conventions. + annotations: + meta.crossplane.io/source: https://github.com/negz/example-fn + meta.crossplane.io/description: An example function +``` + +## Alternatives Considered + +Most of the alternatives considered in this design could also be thought of as +future considerations. In most cases these alternatives don't make sense at the +present time but likely will in the future. + +### Using Webhooks to Run Functions + +Crossplane could invoke functions by calling a webhook rather than running an +OCI container. In this model function input and output would still take the form +of a `FunctionIO`, but would be HTTP request and response bodies rather than a +container's stdin and stdout. + +The primary detractor of this approach is the burden it puts on function authors +and Crossplane operators. Rather than simply publishing an OCI image the author +and/or Crossplane operator must deploy and operate a web server, ensuring secure +communication between Crossplane and the webhook endpoint. + +Support for `type: Webhook` functions will likely be added shortly after initial +support for `type: Container` functions is released. + +### Using chroots to Run Functions + +Crossplane could invoke functions packaged as OCI images by unarchiving them and +then running them inside a simple `chroot`. This offers more compatibility than +rootless containers at the expense of isolation - it's not possible to constrain +a chrooted function's compute resources, network access, etc. `type: Chroot` +functions would use the same artifacts as `type: Container` functions but invoke +them differently. + +Support for `type: Chroot` functions could be added shortly after initial +support for `type: Container` functions are released if `type: Container` proves +to be insufficiently compatible (e.g. for clusters running gVisor, or that +require seccomp be enabled). + +### Using Kubernetes to Run Containerized Functions + +Asking Kubernetes to run a container pipeline is less straightforward than you +might think. Crossplane could schedule a `Pod` for each XR reconcile, or create +a `CronJob` to do so regularly. Another option could be to connect directly to a +Kubelet. This approach would enjoy all the advantages of the existing Kubelet +machinery (pull secrets, caching, etc) but incurs overhead in other areas, for +example: + +* Every reconcile requires a pod to be scheduled, which may potentially block on + node scale-up, etc. +* stdin and stdout must be streamed via the API server, for example by using the + [`/attach` subresource][attach]. +* Running containers in order requires either (ab)using init containers or + injecting a middleware binary that blocks container starts to ensure they run + in order (similar to Argo Workflow's '[emissary]' executor): + +> The emissary works by replacing the container's command with its own command. +> This allows that command to capture stdout, the exit code, and easily +> terminate your process. The emissary can also delay the start of your process. + +You can see some of the many options Argo Workflows explored to address these +issues before landing on `emissary` in their list of +[deprecated executors][argo-deprecated-executors]. + +### Using KRM Function Spec Compliant Functions + +While the design proposed by this document is heavily inspired by KRM Functions, +the [KRM function specification][krm-fn-spec] as it currently exists is not an +ideal fit. This is because: + +1. It is built around the needs of CLI tooling - including several references to + (client-side) 'files' that don't exist in the Crossplane context. +1. Crossplane needs additional metadata to distinguish which resource in the + `ResourceList` is the composite resource and which are the composed + resources. + +### gVisor + +[gVisor][gvisor] supports rootless mode, but requires too many privileges to run +in a container. A proof-of-concept [exists][gvisor-unpriv] to add an +`--unprivileged` flag to gVisor, allowing it to run inside a container. It's +unlikely that gVisor will work in all situations in the near future - for +example gVisor cannot currently run inside gVisor and support for anything other +than x86 architectures is experimental. + +[term-composition]: https://crossplane.io/docs/v1.9/concepts/terminology.html#composition +[v0.10.0]: https://github.com/crossplane/crossplane/releases/tag/v0.10.0 +[composition-design]: https://github.com/crossplane/crossplane/blob/e02c7a3/design/design-doc-composition.md#goals +[declarative-app-management]: https://docs.google.com/document/d/1cLPGweVEYrVqQvBLJg6sxV-TrE5Rm2MNOBA_cxZP2WU/edit +[bcl]: https://twitter.com/bgrant0607/status/1123620689930358786?lang=en +[terraform-count]: https://www.terraform.io/language/meta-arguments/count +[turing-complete]: https://en.wikipedia.org/wiki/Turing_completeness#Unintentional_Turing_completeness +[pitfalls-dsl]: https://github.com/kubernetes/community/blob/8956bcd54dc6f99bcb681c79a7e5399289e15630/contributors/design-proposals/architecture/declarative-application-management.md#pitfalls-of-configuration-domain-specific-languages-dsls +[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime +[krm-fn-spec]: https://github.com/kubernetes-sigs/kustomize/blob/9d5491/cmd/config/docs/api-conventions/functions-spec.md +[rawextension]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#rawextension +[unix-domain-sockets]: https://man7.org/linux/man-pages/man7/unix.7.html +[rootless]: https://rootlesscontaine.rs/how-it-works/userns/ +[cgroups-v2-distros]: https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled +[overlayfs]: https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html +[go-containerregistry]: https://github.com/google/go-containerregistry +[oci-rt-cfg]: https://github.com/opencontainers/runtime-spec/blob/v1.0.2/config.md +[oci-img-cfg]: https://github.com/opencontainers/image-spec/blob/v1.0.2/config.md +[gvisor-mountns]: https://github.com/google/gvisor/issues/221 +[kep-seccomp]: https://github.com/kubernetes/enhancements/issues/2413 +[package-meta]: https://github.com/crossplane/crossplane/blob/035e77b/design/one-pager-package-format-v2.md +[xpkg-spec]: https://github.com/crossplane/crossplane/blob/035e77b/docs/reference/xpkg.md +[attach]: https://github.com/kubernetes/kubectl/blob/18a531/pkg/cmd/attach/attach.go +[emissary]: https://github.com/argoproj/argo-workflows/blob/702b293/workflow/executor/emissary/emissary.go#L25 +[argo-deprecated-executors]: https://github.com/argoproj/argo-workflows/blob/v3.4.1/docs/workflow-executors.md +[krm-fn-spec]: https://github.com/kubernetes-sigs/kustomize/blob/9d5491/cmd/config/docs/api-conventions/functions-spec.md +[krm-fn-runtimes]: https://github.com/GoogleContainerTools/kpt/issues/2567 +[krm-fn-catalog]: https://catalog.kpt.dev +[gvisor]: https://gvisor.dev +[gvisor-unpriv]: https://github.com/google/gvisor/issues/4371#issuecomment-700917549 \ No newline at end of file diff --git a/design/design-doc-composition-functions.md b/design/design-doc-composition-functions.md index 107ccef51..e6a84e6ce 100644 --- a/design/design-doc-composition-functions.md +++ b/design/design-doc-composition-functions.md @@ -1,8 +1,8 @@ - # Composition Functions + # Composition Functions: Beta -* Owners: Nic Cope (@negz), Sergen Yalçın (@sergenyalcin) -* Reviewers: Crossplane Maintainers -* Status: Draft +* Owners: Nic Cope (@negz) +* Reviewers: Hasan Turken (@turkenh), Jared Watts (@jbw976) +* Status: Accepted ## Background @@ -99,74 +99,99 @@ experiences designing Kubernetes, its precursor Borg, and many generations of configuration languages at Google, including [BCL/GCL][bcl]. In particular, we wanted to: -* Avoid organically growing a new DSL. These languages tend to devolve to - incoherency as stakeholders push to “bolt on” new functionality to solve - pressing problems at the expense of measured design. Terraform’s DSL - unintuitively [supporting the count argument in some places but not +* Avoid organically growing a new configuration Domain Specific Language (DSL). + These languages tend to devolve to incoherency as stakeholders push to “bolt + on” new functionality to solve pressing problems at the expense of measured + design. Terraform’s DSL [supporting the count argument in some places but not others][terraform-count] is a great example of this. Inventing a new DSL also comes with the cost of inventing new tooling to test, lint, generate, etc, your DSL. * Stick to configuration that could be modeled as a REST API. Modeling - Composition logic as a schemafied API resource makes it possible for - Crossplane to validate that logic and provide feedback to the platform team at - configuration time. It also greatly increases interoperability thanks to broad - support across tools and languages for interacting with REST APIs. - -It was also important to avoid the “worst of both worlds” - i.e. growing a fully -featured ([Turing-complete][turing-complete]) DSL modeled as a REST API. To this -end we omitted common language features such as conditionals and iteration. Our -rationale being that these features were better deferred to a General Purpose -Programming Language (GPL) designed by language experts, and with extensive -existing tooling. + Composition logic as a schemafied API resource makes it possible to validate + that logic and provide feedback to the platform team at configuration time. It + also greatly increases interoperability thanks to broad support across tools + and languages for interacting with REST APIs. + +It was also important to avoid the “worst of both worlds” - i.e. growing a new, +fully featured DSL modeled as a REST API. To this end we omitted common language +features such as conditionals and iteration. Our rationale being that these +features were better deferred to a General Purpose Programming Language (GPL) or +a mature, existing configuration DSL, with an established ecosystem of tooling +and documentation. Since its inception the Crossplane maintainers’ vision has been that there should essentially be two variants of Composition: * For simple cases, use contemporary "Patch and Transform" (P&T) Composition. -* For advanced cases, bring your tool or programming language of choice. - -In this context a “simple case” might involve composing fewer than ten resources -without the need for logic such as conditionals and iteration. Note that the -Composition logic, whether P&T or deferred to a tool or programming language, is -always behind the API line (behind an XR). This means the distinction is only -important to the people authoring the Compositions, never to the people -consuming them. - -Offering two variants of Composition allows Crossplane users to pick the one -that is best aligned with their situation, preferences, and experience level. -For simple cases you don’t need to learn a new programming language or tool, and -there are no external dependencies - just write familiar, Kubernetes-style YAML. -For advanced cases leverage proven tools and languages with existing ecosystems -and documentation. Either way, Crossplane has no religion - if you prefer not to -“write YAML”, pick another tool and vice versa. +* For advanced cases, use your DSL or GPL of choice. + +Note that the Composition logic (whether P&T, DSL, or GPL) is always "behind the +API line" (behind an XR). This means the distinction is only important to the +people _authoring_ the Compositions, never to the people consuming them. + +In the time since P&T Composition became available we've seen that: + +* Folks want to use Composition for more complex cases than we anticipated. Many + XRs fan out into tens of composed resources, sometimes nested. +* Many Compositions call for a high level of expressiveness - conditionals, + iteration, merging data from multiple fields, etc. +* The lack of a more expressive alternative to P&T Composition _has_ set us down + the path of organically growing a new DSL. (e.g. [#1972], [#2352], [#4051], + [#3917], [#3919], [#3989], [#3498], [#3458], [#3316], [#4036], [#4065], + [#4026]) +* Organically growing a new DSL is not only undesirable, but _slow_. Because + each addition to P&T Composition changes Crossplane's core API we must be + careful about what we accept. Changes take a long time to reach consensus. + They're coupled to Crossplane's release cycle, which means changes can take a + long time to become available, and aren't backported to older versions. +* How folks would ideally configure Composition varies. Some folks prefer a + particular configuration DSL, others look for webhooks written in a GPL, etc. + +To address these issues, we added support for 'Composition Functions' in +Crossplane v1.11. Functions are an alpha feature and are off by default. In the +[alpha design][alpha-design] we proposed that how a Function was implemented +could vary in at least two ways: + +* By `type` - i.e. `type: Container`, `type: Webhook`. +* By "runner" implementation - i.e. how a `type: Container` Function is run. + +We implemented a "default" or "reference" `type: Container` Function runner with +known scalability and compatibility constraints, but didn't lay out a clear path +for others to build alternative Function runners. Since alpha, no alternative +Function runners have appeared. + +The aforementioned limitations of the reference Function runner lead to a demand +to support webhook Functions as an alternative. Ultimately we were reluctant to +support this because it fragments the Function ecosystem. When a Function can be +an OCI container or a webhook (or more in future?) there is no one artifact that +constitutes "a Function". This makes discovery and distribution of Functions +more complicated than we would like. + +This document is an iteration on (and supersedes) the [previous design +document][alpha-design] based on what we've learned since the feature was +launched. In particular, it is motivated by the desire to identify a single kind +of Function artifact suitable for most people - similar to Providers. ## Goals The proposal put forward by this document should: -* Let folks use their composition tool and/or programming language of choice. * Support 'advanced' composition logic such as loops and conditionals. -* Balance safety (e.g. sandboxing) with speed and simplicity. -* Be possible to introduce behind a feature flag that is off by default. - -While not an explicit goal, it would also be ideal if the solution put forth by -this document could serve as a test bed for new features in the contemporary -'resources array' based form of Composition. - -The user experience around authoring and maintaining Composition Functions is -out of scope for this proposal, which focuses only on adding foundational -support for the feature to Crossplane. +* Let folks specify composition logic in their DSL or GPL of choice. +* Make it easy to extend Crossplane with new ways to 'do composition'. +* Decouple adding new ways to 'do composition' from the core release cycle. +* Make it easy to discover and share new ways to 'do composition'. +* Be possible to keep behind a feature flag until it is generally available. ## Proposal -### Overview - -This document proposes that a new `functions` array be added to the existing -`Composition` type. This array of functions would be called either instead of or -in addition to the existing `resources` array in order to determine how an XR -should be composed. The array of functions acts as a pipeline; the output of -each function is passed as the input to the next. The output of the final -function tells Crossplane what must be done to reconcile the XR. +This document proposes that a new `pipeline` array of Function calls be added to +the existing `Composition` type. This array of Functions would be called either +instead of or in addition to the existing `resources` array in order to +determine how an XR should be composed. The array of Functions acts as a +pipeline; the output of each Function is passed as the input to the next. The +output of the final Function call tells Crossplane what must be done to +reconcile the XR. ```yaml apiVersion: apiextensions.crossplane.io/v2alpha1 @@ -175,32 +200,609 @@ metadata: name: example spec: compositeTypeRef: - apiVersion: database.example.org/v1alpha1 + apiVersion: database.example.org/v1 kind: XPostgreSQLInstance - functions: - - name: my-cool-function - type: Container - container: - image: xkpg.io/my-cool-function:0.1.0 + # This Composition uses a pipeline of Functions instead of (P&T) resources. + pipeline: + # Each step in the pipeline calls one Composition Function. + - step: compose-xr-using-go-templates + # The functionRef tells the Composition which Function to call. Crossplane + # passes the desired and observed state of the XR and any existing composed + # resources as 'arguments' to the Function call. + functionRef: + name: go-templates + # A Function call may optionally accept input. Think of this like an + # additional, optional argument to the Function call. The input is a nested + # KRM resource - i.e. it has an apiVersion and kind. + input: + apiVersion: example.org/v1 + kind: GoTemplate + source: Remote + remote: git://github.com/example/my-xpostgresql-go-templates + # A pipeline can have multiple steps. Each step is processed in order. This + # validation step is passed the desired state accumulated by the prior step. + - step: validate-composed-resources + functionRef: + name: cel-validation ``` -Under this proposal each function is the entrypoint of an OCI image, though the -API is designed to support different function implementations (such as webhooks) -in the future. The updated API would affect only the `Composition` type - no -changes would be required to the schema of `CompositeResourceDefinitions`, XRs, -etc. +The updated API would affect only the `Composition` type - no changes would be +required to the schema of `CompositeResourceDefinitions`, XRs, etc. -Notably the functions would not be responsible for interacting with the API -server to create, update, or delete composed resources. Instead, they instruct -Crossplane which resources should be created, updated, or deleted. - -Under the proposed design functions could also be used for purposes besides +Notably the Functions would not need to be responsible for interacting with the +API server to create, update, or delete composed resources. Instead, they +instruct Crossplane which resources should be created, updated, or deleted. +Under the proposed design Functions could also be used for purposes besides rendering composed resources, for example validating the results of the -`resources` array or earlier functions in the `functions` array. Furthermore, a -function could also be used to implement 'side effects' such as triggering a -replication or backup. +`resources` array or earlier Functions in the `pipeline`. -Below is a more detailed example of an entry in the `functions` array. +Before you can use a Function, you must install it. Installing a Function works +just like installing a Provider: + +```yaml +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: go-templates +spec: + package: xpkg.upbound.io/negz/go-templates:v0.1.0 +``` + +The Function package's `metadata.name` corresponds to the `functionRef` in the +previous example Composition. A Composition specifies which Function to run by +referencing the package's name. + +### Calling a Function + +Despite the name, a 'Function' is actually more like a 'function server'. Under +this proposal, Functions are long-running processes. When you install one, the +package manager deploys it using a Kubernetes Deployment - the same way it would +deploy a Provider. + +![Crossplane calling three Functions via gRPC](assets/design-doc-composition-functions/functions.png) + +Crossplane makes a gRPC `RunFunctionRequest` to the Function it wishes to +invoke. The Function should respond with a `RunFunctionResponse`. These RPCs are +defined as follows: + +```protobuf +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/duration.proto"; + +// A FunctionRunnerService is a Composition Function. +service FunctionRunnerService { + // RunFunction runs the Composition Function. + rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} +} + +// A RunFunctionRequest requests that the Composition Function be run. +message RunFunctionRequest { + // Metadata pertaining to this request. + RequestMeta meta = 1; + + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + State observed = 2; + + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by processing a Composition's + // patch-and-transform resources array. It may also have been accumulated by + // previous Functions in the pipeline. + State desired = 3; + + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + optional google.protobuf.Struct input = 4; +} + +// A RunFunctionResponse contains the result of a Composition Function run. +message RunFunctionResponse { + // Metadata pertaining to this response. + ResponseMeta meta = 1; + + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + State desired = 2; + + // Results of the Function run. Results are used for observability purposes. + repeated Result results = 3; +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +message RequestMeta { + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + string tag = 1; +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +message ResponseMeta { + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + string tag = 1; + + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + optional google.protobuf.Duration ttl = 2; +} + +// State of the composite resource (XR) and any composed resources. +message State { + // The state of the composite resource (XR). + Resource composite = 1; + + // The state of any composed resources. + map resources = 2; +} + +// A Resource represents the state of a resource. +message Resource { + // The JSON representation of the resource. + google.protobuf.Struct resource = 1; + + // The resource's connection details. + map connection_details = 2; +} + +// A Result of running a Function. +message Result { + // Severity of this result. + Severity severity = 1; + + // Human-readable details about the result. + string message = 2; +} + +// Severity of Function results. +enum Severity { + SEVERITY_UNSPECIFIED = 0; + + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + SEVERITY_FATAL = 1; + + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + SEVERITY_WARNING = 2; + + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + SEVERITY_NORMAL = 3; +} +``` + +This RPC is essentially the `RunFunctionRequest` from the [alpha Functions +design][alpha-design] with the [`FunctionIO`][functionio-schema] elevated from +opaque YAML-encoded bytes to 'native' RPC code. Kubernetes resources are +represented using the [`google.protobuf.Struct` well-known +type][google-protobuf-struct], which can encode arbitrary JSON. + +Some key differences between the alpha `FunctionIO` and the proposed beta +`RunFunctionRequest`: + +* `observed.resources` and `desired.resources` are a map keyed by resource name, + not an array of objects with name fields. The previous pattern was a result of + `FunctionIO` attempting to be an idiomatic KRM object, which is not a + constraint for `RunFunctionRequest`. This is should make it easier to lookup + resources by name when developing Functions. +* Entries in `desired.resources` can no longer return 'derived' connection + secrets or readiness checks, similar to an entry in the P&T resources array. + Instead a Function is intended to set XR connection details and/or readiness + by mutating them directly. + +The package manager is responsible for creating a headless Kubernetes Service +where each Function's Deployment can be reached. The address of the Service will +be exposed as the `status.endpoint` of the Function resource. The Service must +be headless in order for Crossplane's gRPC client to load-balance connections +when there are multiple Function replicas. + +Note that the fact this endpoint is powered by a Service is an implementation +detail; it may be possible for Functions to be reached (and indeed deployed) by +other means in future (see [Runtime Configuration](#runtime-configuration)). + +```yaml +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: go-templates +spec: + package: xpkg.upbound.io/negz/go-templates:v0.1.0 +status: + # The gRPC endpoint where Crossplane will send RunFunctionRequests. + endpoint: https://go-templates-9sdfn2 +``` + +gRPC communication between Crossplane and a Function will be secured by mutual +transport layer security (mTLS). Crossplane has an established pattern for +setting up mTLS certificate bundles - we do this today to secure communication +between the API server and our validation webhooks, and between Crossplane and +External Secret Stores. We might want to [rethink this][cert-per-entity] in +future, but to begin with we'll stick with the established pattern, i.e.: + +* The Helm chart creates an empty Secret for the client bundle. +* The core init container bootstraps the client bundle. +* The package manager creates a Secret for each Function's server bundle. +* The package manager bootstraps each Function's server bundle. + +See [#3884] for an example of this pattern. + +The frequency with which Crossplane will call each Function is relative to how +frequently each XR is reconciled. There are two main triggers to reconcile an XR: + +1. A watch-triggered reconcile, i.e. because the XR changed. +2. A poll-triggered reconcile, by default once per 60 seconds. + +XRs rely on poll-triggered reconciliation to promptly correct drift of their +composed resources. The poll interval can be set using the `--poll-interval` +flag. The XR controller does not know what kinds of resources it will compose +when started, so it cannot start a watch for them. + +Note that there is no watch on the Composition type. XRs consume a Composition +via a CompositionRevision. Each time the Composition changes a new revision is +created. The XR is pinned to a specific revision of a Composition until it is +updated to consume a new one. This update would trigger a reconcile due to the +watch on the XR. + +Assume: + +* There are 100 XRs, all of the same type, and all using the same Composition. +* The XRs have all reached a steady state, polling at the default 60s frequency. +* The Composition includes 1 Function call. + +In this case the Function would be called __100 * 1 = 100 times per 60s__. + +Most gRPC server implementations can serve multiple requests concurrently. For +example in Go [each RPC is invoked in its own goroutine][go-grpc-concurrency]. +The gRPC project maintains a [suite of benchmarks][grpc-benchmarks]. At the time +of writing these benchmarks show that an 8-core Python server can serve ~3,500 +trivial requests per second, while an 8-core Go server can serve ~180,000 per +second. This should be considered a ceiling rather than indicative of real-world +Function performance. In practice the throughput of a particular Function will +be determined by the language it's written in, and what it does. + +Inevitably some Functions will be called frequently enough to exhaust the +processing capacity of a single pod. A few potential approaches are possible to +scale Functions include: + +* Tune the `--poll-interval`, e.g. to every 5m or 10m, to reduce calls. +* Scale heavily used Functions horizontally, by increasing Deployment replicas. +* Cache the responses of deterministic Function calls (see [Caching](#caching)). +* Eliminate poll-triggered XR reconciliation to reduce calls (see [Dynamic + Composed Resource Watches](#dynamic-composed-resource-watches)). + +### Developing a Function + +I believe it's critical that Composition Functions are easy to develop - much +easier than developing a Crossplane Provider. + +Not everyone will want or need to develop Functions. Some Functions will be +_generic_. A generic Function would be able to work with any kind of XR. +Consider for example a `go-templates` Function that effectively "outsourced" the +entire Composition logic of an XR to a set of Go templates - a familiar +authoring experience to anyone who has used Helm. + +Despite the potential for generic Functions, I feel it must be easy to "roll +your own" because: + +* We want to make it really easy to make new generic Functions! The easier + Functions are to build, the more community members can contribute to the + ecosystem. +* Sometimes there won't be a perfect Function for your unique use-case. The + easier Functions are to build, the more likely you are to be able to help + yourself. +* Some folks just _prefer_ to configure how to take an XR and produce a set of + composed resources using the expressiveness and familiarity of a + general-purpose programming language. The more complex your Composition logic + becomes, the more appealing this approach is. + +I propose we do three things to make Functions easy to develop: + +1. Provide SDKs for popular languages - e.g. Go, Python, TypeScript. +2. Provide tooling to scaffold, build, push, and test Functions. +3. Support providing Function logic as an OCI container that runs to completion. + +Consider this section a medium-resolution sketch of the Function development +experience. Likely each of these components (tooling, SDKs, etc) warrant at +least a one-pager of their own, and they may not look exactly as proposed here. +This section intends only to paint a picture of what Functions enable. + +#### Use Language SDKs + +Consider for example the following experience for writing a Go Function. Keep in +mind a similar experience would also exist for other languages. + +```shell +# Initialize a new Function that uses the Go SDK. +$ kubectl crossplane init function --template="github.com/crossplane/fn-template-go" + +# This generates boilerplate that uses the Go SDK. +$ ls +crossplane.yaml fn.go fn_test.go go.mod main.go + +# At a minimum you need to add your Function logic to fn.go +vim fn.go +``` + +Note that Functions have package metadata (in `crossplane.yaml` by convention) +just like a Provider or Configuration. A Function may declare dependencies - for +example a Function could depend on a Provider to indicate that it composes that +Provider's MRs. + +After you initialize a Function using the Go SDK you must update fn.go to +provide your composition logic. When you first open fn.go you’ll see that +`kubectl crossplane init function` added boilerplate like this: + +```go +package main + +import ( + "context" + + function "github.com/crossplane/fn-sdk-go" + "github.com/crossplane/fn-sdk-go/pb" + "github.com/crossplane/fn-sdk-go/errors" +) + +// Function defines the logic of your Function. +func Function(ctx context.Context, req *pb.RunFunctionRequest) (*pb.RunFunctionResponse, error) { + // Get the desired XR from the request. + xr := function.NewEmptyCompositeResource() + if err := function.GetCompositeResource(req, xr); err != nil { + return errors.Wrap(err, "could not get composite resource") + } + + // TODO: Replace this with your function logic. :) + rsp := function.NewResponseTo(req) + function.Normalf(rsp, "successfully read desired composite resource %q", xr.GetName()) + + return rsp, nil +} +``` + +To write a Function, replace the boilerplate with your own logic. Here’s an +example that uses a mockup of a Go SDK to achieve the same goal as lab 4 from +[the Kubecon EU ContribFest][kubecon-eu-contribfest]. + +```yaml +apiVersion: contribfest.crossplane.io/v1alpha1 +kind: XRobotGroup +metadata: + name: somename +spec: + count: 5 +``` + +This example takes an `XRobotGroup` XR and uses its `spec.count` to compose the +desired number of Robot resources: + +```go +package main + +import ( + "context" + "fmt" + "math/rand" + + "github.com/upbound/provider-dummy/apis/iam/v1alpha1" + + function "github.com/crossplane/fn-sdk-go" + "github.com/crossplane/fn-sdk-go/pb" + "github.com/crossplane/fn-sdk-go/errors" +) + +var colors = []string{"red", "green", "blue", "yellow", "orange"} + +// Function defines the logic of your Function. +func Function(ctx context.Context, req *pb.RunFunctionRequest) (*pb.RunFunctionResponse, error) { + // Get the desired XR from the request. + xr := function.NewEmptyCompositeResource() + function.GetCompositeResource(req, xr) + + // Get the desired resource count from the XR. + count, err := xr.GetInteger("spec.count") + if err != nil { + return errors.Wrap(err, "could not get desired resource count") + } + + // Create a response to return. This deep-copies any existing desired state. + rsp := function.NewResponseTo(req) + + // Ensure the desired number of robots exist. + for i := 0; i < int(count); i++ { + name := fmt.Sprintf("robot-%d") + + // Get the desired composed resource (if any) from the request. + // Since we're using Go we can import and work with API types if we want. + robot := &v1alpha1.Robot{} + function.GetComposedResource(req, name, robot) + + // The robot's external name should be derived from the XR's. + function.SetExternalName(robot, fmt.Sprintf("%s-%s", function.GetExternalName(xr), name)) + + // Give this robot a random color! + if robot.Spec.ForProvider.Color != "" { + c := colors[rant.Intn(len(colors))] + robot.Spec.ForProvider.Color = c + + // Add a result indicating that we set the color. Crossplane will + // surface this as a Kubernetes event and a debug log. + function.Normalf(rsp, "set robot %q color to %q", name, c) + } + + // Set our new desired robot state. This will be a no-op if our robot + // already existed. + function.SetComposedResource(rsp, name, robot) + } + + return rsp, nil +} +``` + +The goals of a Composition Function SDK are to: + +* Eliminate boilerplate - make it feel like writing a function, not a server. +* Expose an API that makes writing Functions intuitive and self-documenting. +* Steer Function authors toward best practices, and away from anti-patterns. + +This mockup only touches on very basic functionality. A fully featured SDK would +cover more use cases - for example reading Function-specific input and working +with connection details. + +Once you’ve written your Function code you can test it using the tooling for +your chosen language - in this case `go test`. `kubectl crossplane` would create +boilerplate for unit tests. + +You build and push a Function just like you would a Provider or Configuration. +There are already GitHub Actions that allow this to be done as part of a CI/CD +pipeline. + +```shell +# Building the Function produces an installable Function package. +$ kubectl crossplane function build + +# Push the package to any OCI registry. +$ kubectl crossplane function push xpkg.upbound.io/negz/function-many-robots:v0.1.0 +``` + +Note that leveraging gRPC makes it possible to generate server stubs for [many +languages][grpc-supported-languages]. I would not consider these server stubs to +be 'an SDK' - they don't make writing Functions intuitive and self-documenting, +or steer Function authors toward best practices. They do however go a long way +toward eliminating boilerplate, including generating language-specific +representations of a `RunFunctionRequest` and `RunFunctionResponse`. + +#### Use an OCI Container + +In the [alpha iteration of Composition Functions][alpha-design] all Functions +are binaries that run to completion. These processes are packaged as an OCI +container. A sidecar container in the Crossplane pod - `xfn` - runs each of them +in a rootless container. + +This doesn't scale very well, but it does offer a way to build a Function for +folks who don't want to write code; you can get a long way just using a tool +like https://github.com/mikefarah/yq. It also offers a way to build Functions in +languages that don't have a 'first class' SDK available. Think of this like AWS +Lambda, which supports 9-10 first class languages, and OCI containers as a +catch-all for everything else. + +I propose we move the `xfn` Function runner out of crossplane/crossplane, +simplify it, and offer it as an alternative way to build a Function. You could +think of this as a special kind of Composition Function SDK that builds a +Composition Function from an OCI container, rather than building a Function a +literal _function_ (e.g. written in Go). + +![Crossplane calling two containerized Functions via gRPC](assets/design-doc-composition-functions/containerized-functions.png) + +Under this design each containerized Function is "wrapped" in an `xfn`-like +Composition Function. This means each Function exists as its own Deployment. + +This design merges the Protobuf `RunFunctionRequest` with the YAML `FunctionIO` +types from the alpha design, so instead of reading a `FunctionIO` from stdin a +containerized Function would read a JSON-encoded `RunFunctionRequest`, and write +a `RunFunctionResponse` to stdout. + +Building a containerized Function would work as follows. + +Start with a simple container that reads a JSON `RunFunctionRequest` on stdin +and writes a JSON `RunFunctionResponse` to stdout. This one simply adds a label +(credit to @pedjak in https://github.com/crossplane/crossplane/issues/4293) for +the idea). + +```Dockerfile +FROM alpine:3.18 +RUN apk add --no-cache jq +ENTRYPOINT ["/bin/sh", "-c", "jq '(.desired.resources[] | .resource.metadata.labels) |= {\"labelizer.xfn.crossplane.io/processed\": \"true\"} + .' | .desired"] +``` + +Then build a Function from the image. We want the image to run in its own +container, not the Function container. This allows the OCI container to run +within its own namespaces without interfering with (or being interfered with by) +the `xfn`-like 'adaptor'. To do this we bake the container into the Function +artifact: + +```shell +# Build your OCI image. +$ docker build . +Successfully built 202dc6e5df4c + +# Save your OCI image to a tarball. +$ docker save | gzip > function.tgz + +# Add some package metadata for your Function. +$ vim crossplane.yaml + +# Build a Function from the tarball. +$ kubectl crossplane function build --from-oci-tarball=function.tgz + +# Push the package to any OCI registry. +$ kubectl crossplane function push xpkg.upbound.io/negz/function-labelizer:v0.1.0 +``` + +The `function build --from-oci-tarball` command creates a Function powered by an +`xfn`-like adaptor that: + +1. Prepares an OCI runtime bundle from the supplied tarball at startup. +2. Listens for `RunFunctionRequest` RPCs. +3. Prepares `RunFunctionResponse` RPCs by using an OCI runtime (e.g. `crun`) to + invoke the embedded Function in a rootless container. + +Note that most of this functionality already exists in `xfn`. This makes it +cheap enough to implement this Function variant that I believe it's worth doing, +even if it ends up covering a relatively niche case. + +### Installing a Function + +Before you can use a Function, you must install it. Installing a Function works +just like installing a Provider: + +```yaml +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: go-templates +spec: + package: xpkg.upbound.io/negz/go-templates:v0.1.0 +``` + +Like any other package, the Function type will have a corresponding +FunctionRevision type. The package manager will reconcile the active revision by +creating a Deployment - similar to how it installs a Provider. + +Functions can have and be dependencies, just like a Provider or Configuration. +For example a Configuration could depend on the Functions its Compositions use, +and a Function could depend on the Providers and Configurations whose resources +it will compose. + +### Calling a Function + +Composition Functions are very flexible, so the user experience could vary a +lot. However the general pattern is: + +1. Find (or build!) a Function you want to use. +2. Install the Function using the package manager. +3. Reference the Function in a Composition. +4. Provide Function input in the Composition, where necessary. + +Again, consider this section a medium-resolution sketch of the Function +experience. The generic Functions demonstrates here don't actually exist (yet!). +They may not look exactly as proposed here. This section intends only to paint a +picture of what Functions enable. + +Consider the hypothetical generic "Go templates" Function put forward at the +[beginning of this proposal](#proposal). Such a Function would make it possible +to write a Composition that used familiar, Helm-like moustache templates with +support for iteration and conditionals. + +If we take the [contribfest][kubecon-eu-contribfest] example again, this might +look something like this: ```yaml apiVersion: apiextensions.crossplane.io/v2alpha1 @@ -209,542 +811,327 @@ metadata: name: example spec: compositeTypeRef: - apiVersion: database.example.org/v1alpha1 + apiVersion: database.example.org/v1 kind: XPostgreSQLInstance - functions: - - name: my-cool-function - type: Container - # Configuration specific to `type: Container` functions. - container: - # The OCI image to pull and run. - image: xkpg.io/my-cool-function:0.1.0 - # Whether to pull the function Never, Always, or IfNotPresent. - imagePullPolicy: IfNotPresent - # Secrets used to pull from a private registry. - imagePullSecrets: - - namespace: crossplane-system - name: my-xpkg-io-creds - # Note that only resource limits are supported - not requests. - # The function will be run with the specified resource limits. - resources: - limits: - memory: 64Mi - cpu: 250m - # Defaults to 'Isolated' - i.e an isolated network namespace. - network: Accessible - # How long the function may run before it's killed. Defaults to 10s. - timeout: 30s - # Containers are run by an external process listening at the supplied - # endpoint. Specifying an endpoint is optional; the endpoint defaults to - # the below value. - runner: - endpoint: unix:///@crossplane/fn/default.sock - # An x-kubernetes-embedded-resource RawExtension (i.e. an unschemafied - # Kubernetes resource). Passed to the function as the config block of its - # FunctionIO. - config: - apiVersion: database.example.org/v1alpha1 - kind: Config - metadata: - name: cloudsql - spec: - version: POSTGRES_9_6 + pipeline: + - step: compose-xr-using-go-templates + functionRef: + name: go-templates + input: + apiVersion: example.org/v1 + kind: GoTemplate + source: Inline # Or Remote, if you want to pull templates from git. + inline: | + {{- range $i := until ( . desired.composite.resource.spec.count ) }} + --- + apiVersion: iam.dummy.upbound.io/v1alpha1 + kind: Robot + metadata: + annotations: + crossplane.io/external-name: {{ .desired.composite.resource | getExternalName }}-robot-{{ $i }} + spec: + forProvider: + {{- if $i | printf "robot-%d" | getComposedResource }} + # This resource already exists - use the same color. + color: {{ .spec.forProvider.color }} + {{ else }} + # The robot doesn't exist. Pick a new color. + color: {{ randomChoice "red" "green" "blue }} + {{- end }} + {{- end }} ``` -### Function API +This example is functionally identical to [the Go example](#use-language-sdks), +but uses a DSL - Go templates with http://masterminds.github.io/sprig/ and a +few Crossplane-specific Functions. -This document proposes that each function uses a `FunctionIO` type as its input -and output. In the case of `Container` functions this would correspond to stdin -and stdout. Crossplane would be responsible for reading stdout from the final -function and applying its changes to the relevant XR and composed resources. +Here's another example of a generic Function. This one lets Composition authors +express their Composition logic in arbitrary, inline [Starlark][starlark]. ```yaml -apiVersion: apiextensions.crossplane.io/v1alpha1 -kind: FunctionIO -config: - apiVersion: database.example.org/v1alpha1 - kind: Config - metadata: - name: cloudsql - spec: - version: POSTGRES_9_6 -observed: - composite: - resource: - apiVersion: database.example.org/v1alpha1 - kind: XPostgreSQLInstance - metadata: - name: my-db - spec: - parameters: - storageGB: 20 - compositionSelector: - matchLabels: - provider: gcp - status: - conditions: - - type: Ready - status: True - connectionDetails: - - name: uri - value: postgresql://db.example.org:5432 +apiVersion: apiextensions.crossplane.io/v2alpha1 +kind: Composition +metadata: + name: example +spec: + compositeTypeRef: + apiVersion: database.example.org/v1 + kind: XPostgreSQLInstance + pipeline: + - step: compose-xr-using-go-templates + functionRef: + name: starlark + input: + apiVersion: example.org/v1 + kind: StarlarkScript + script: | + # main is passed a RunFunctionRequest as a (Python-like) dictionary. + # It must return a RunFunctionResponse-shaped dictionary. + def main(req): + rsp = {desired=req["desired"]} + xr = req["observed"]["composite"]["resource"] + + for i in range(int(xr["spec"]["count"])): + name = "robot-{}".format(i) + + # We already created this one during a previous reconcile. + if name in req["observed"]["resources"]: + rsp["desired"]["resources"][name] = req["observed"]["resources"][name] + continue + + # This robot should exist, but doesn't. + rsp["desired"]["resources"][name] = { + "resource": { + "apiVersion": "iam.dummy.upbound.io/v1alpha1", + "kind": "Robot", + "metadata": { + "annotations": { + "crossplane.io/external-name": "{parent}-robot-{number}".format( + parent=xr["metadata"]["annotations"]["crossplane.io/external-name"], + number=i, + ) + }, + }, + "spec": { + "forProvider": { + # Starlark is deterministic so these Robots are always purple. :) + "color": "purple", + }, + }, + } + } + + return rsp ``` -A `FunctionIO` resource consists of the following top-level fields: - -* The `apiVersion` and `kind` (required). -* A `config` object (optional). This is a [Kubernetes resource][rawextension] - with an arbitrary schema that may be used to provide additional configuration - to a function. For example a `render-helm-chart` function might use its - `config` to specify which Helm chart to render. Functions need not return - their `config`, and any mutations will be ignored. -* An `observed` object (required). This reflects the observed state of the XR, - any existing composed resources, and their connection details. Functions must - return the `observed` object unmodified. -* A `desired` object (optional). This reflects the accumulated desired state of - the XR and any composed resources. Functions may mutate the `desired` object. -* A `results` array (optional). Used to communicate information about the result - of a function, including warnings and errors. Functions may mutate the - `results` object. - -Each function takes its `config` (if any), `observed` state, and any previously -accumulated `desired` state as input, and optionally mutates the `desired` -state. This allows the output of one function to be the input to the next. - -The `observed` object consists of: - -* `observed.composite.resource`. The observed XR. -* `observed.composite.connectionDetails`: The observed XR connection details. -* `observed.resources[N].name`: The name of an observed composed resource. -* `observed.resources[N].resource`: An observed composed resource. -* `observed.resources[N].connectionDetails`: An observed composed resource's - current connection details. - -If an observed composed resource appears in the Composition's `spec.resources` -array their `name` fields will match. Note that the `name` field is distinct -from a composed resource's `metadata.name` - it is used to identify the resource -within a Composition and/or its function pipeline. - -The `desired` object consists of: - -* `desired.composite.resource`. The desired XR. -* `desired.composite.resource.connectionDetails`. Desired XR connection details. -* `desired.resources[N].name`. The name of a desired composed resource. -* `desired.resources[N].resource`. A desired composed resource. -* `desired.resources[N].connectionDetails`. A desired composed resource's - connection details. -* `desired.resources[N].readinessChecks`. A desired composed resource's - readiness checks. - -Note that the `desired.resources` array of the `FunctionIO` type is very -similar to the `spec.resources` array of the `Composition` type. In comparison: - -* `name` works the same across both types, but is required by `FunctionIO`. -* `connectionDetails` and `readinessChecks` work the same across both types. -* `FunctionIO` does not support `base` and `patches`. Instead, a function should - configure the `resource` field accordingly. - -The `desired` state is _accumulated_ across the Composition and all of its -functions. This means the first function may be passed desired state as -specified by the `spec.resources` array of a Composite, if any, and each -function must include the accumulated desired state in its output. Desired state -is treated as an overlay on observed state, so a function pipeline need not -specify the desired state of the XR (for example) unless a function wishes to -mutate it. - -A full `FunctionIO` specification will accompany the implementation. Some -example scenarios are illustrated below. - -A function that wanted to create (compose) a `CloudSQLInstance` would do so by -returning the following `FunctionIO`: - -```yaml -apiVersion: apiextensions.crossplane.io/v1alpha1 -kind: FunctionIO -observed: {} # Omitted for brevity. -desired: - resources: - - name: cloudsqlinstance - resource: - apiVersion: database.gcp.crossplane.io/v1beta1 - kind: CloudSQLInstance - spec: - forProvider: - databaseVersion: POSTGRES_9_6 - region: us-central1 - settings: - tier: db-custom-1-3840 - dataDiskType: PD_SSD - dataDiskSizeGb: 20 - writeConnectionSecretToRef: - namespace: crossplane-system - name: cloudsqlpostgresql-conn - connectionDetails: - - name: hostname - fromConnectionSecretKey: hostname - readinessChecks: - - type: None +Starlark is a (very) limited dialect of Python designed for configuration. It +embeds into many languages, including Go. This makes it a great candidate to +build a generic Function that allows Composition authors to provide _inline_ +logic in a general-purpose-ish programming language. This is similar to the +["GitHub Script"][github-script] GitHub Action that lets you plug some arbitrary +logic into a GitHub Action pipeline when no existing Action does quite what you +need. + +#### Iterating on Compositions + +Getting fast feedback on a Crossplane Composition has historically been a pain +because the Composition logic has been encapsulated in a Kubernetes controller. +In order to know whether your Composition works as intended you need to: + +1. Deploy (the right version of!) Crossplane to a Kubernetes cluster. +2. Install Providers and Configurations for any resources you want to compose. +3. Apply your XRD and Composition (potentially via a Configuration). +4. Create an XR (or claim). +5. See whether it works. + +Moving Composition logic out of Crossplane and into versioned Functions makes it +a lot easier to test and iterate on, client-side. For example a `function test` +command could test a single Function in isolation: + +```shell +# Test a single Function by passing it a JSON RunFunctionRequest. This pulls and +# starts the Function, makes the request, stops it, then returns the result. +$ kubectl crossplane function test xpkg.upbound.io/negz/go-templates:v0.1.0 run-function-request.json +{ + # JSON encoded RunFunctionResponse omitted for brevity. +} ``` -A function that wanted to set only an XR connection detail could return: +You could similarly imagine a more holistic, `helm template`-like experience +that locally rendered an entire Composition by pulling and running its templates +locally: -```yaml -apiVersion: apiextensions.crossplane.io/v1alpha1 -kind: FunctionIO -observed: {} # Omitted for brevity. -desired: - composite: - connectionDetails: - - type: FromValue - name: username - value: admin +```shell +$ kubectl crossplane composition render composition.yaml xr.yaml +--- +# YAML stream omitted for brevity. ``` -A function wishing to delete a composed resource may do so by setting its -`resource` to null, for example: +Assuming a 'pure' Composition consisting only of Functions with no P&T +resources, this command would need only to iterate through the Composition's +`pipeline`, and for each Function: + +1. Pull and start it. +2. Form a `RunFunctionRequest` from the XR and any prior `RunFunctionResponse`. +3. Print the `desired` block of final `RunFunctionResponse` as a YAML stream. + +This isn't quite an end-to-end test of Composition. The composed resources are +not actually created and reconciled with an external system, so its not possible +for example to derive XR status from composed resource status. (Though that +could perhaps be simulated by providing fake observed resources.) It's also not +possible in isolation to determine whether the rendered composed resources are +schematically valid. It does however give you a good idea of whether your +Composition will produce the set of composed resources that you'd expect - just +like `helm template`. + +The advantage of Functions in this regard is that the `composition render` +command would need to duplicate much less code from the Crossplane Composition +controller than if it were to try to recreate Crossplane's hardcoded P&T logic. + +## Future Improvements + +The following functionality is out-of-scope for the beta implementation, but may +be added in future. + +### Function Input Custom Resources + +In the current alpha implementation of Functions, and this design, Function +input is a custom-resource-like inline resource (i.e. an +`x-kubernetes-embedded-resource`): ```yaml -apiVersion: apiextensions.crossplane.io/v1alpha1 -kind: FunctionIO -observed: {} # Omitted for brevity. -desired: - resources: - - name: cloudsqlinstance - resource: null + pipeline: + - step: compose-xr-using-go-templates + functionRef: + name: starlark + input: + apiVersion: example.org/v1 + kind: StarlarkScript + script: ... ``` -A function that could not complete successfully could do so by returning the -following `FunctionIO`: +In future it may be useful for a Function to be able to deliver this type as a +custom resource definition (CRD). This would allow a single input to be more +easily shared by multiple Compositions. A Composition could reference a Function +input custom resource: ```yaml -apiVersion: apiextensions.crossplane.io/v1alpha1 -kind: FunctionIO -config: - apiVersion: database.example.org/v1alpha1 - kind: Config - metadata: - name: cloudsql - spec: - version: POSTGRES_9_6 -observed: {} # Omitted for brevity. -results: -- severity: Error - message: "Could not render Database.postgresql.crossplane.io/v1beta1` + pipeline: + - step: compose-xr-using-go-templates + functionRef: + name: starlark + inputRef: + apiVersion: example.org/v1 + kind: StarlarkScript + name: make-some-purple-robots ``` -### Running Container Function Pipelines +At this stage I suggest holding off on building this functionality until there +is clear demand. -While Crossplane typically runs in a Kubernetes cluster - a cluster designed to -run containers - running an ordered _pipeline_ of short-lived containers via -Kubernetes is much less straightforward than you might expect. Refer to -[Alternatives Considered](#alternatives-considered) for details. +### Metrics and Tracing -In order to provide flexibility and choice of tradeoffs in running containers -(e.g. speed, scalability, security) this document proposes Crossplane defer -containerized functions to an external runner. Communication with the runner -would occur via a gRPC API, with the runner expected to be listening at the -`endpoint` specified via the function's `runner` configuration block. This -endpoint would default to `unix:///@crossplane/fn/default.sock` - an abstract -[Unix domain socket][unix-domain-sockets]. +Crossplane does not currently expose its own metrics. It relies on [the set +it gets from controller-runtime][controller-runtime-metrics], and metrics that +may be [derived from the Kubernetes events it emits][event-exporter-metrics]. -Communication between Crossplane and a containerized function runner would use -the following API: +These metrics pertain to Kubernetes controllers, and will be insufficient when a +significant portion of Composition logic is 'outsourced' to Functions. It will +be important to establish a pattern for instrumenting Functions (e.g. as part of +the Function SDKs). Request tracing in particular is likely to be useful in +order to debug slow or failing Functions in a pipeline. -```protobuf -syntax = "proto3"; +Metrics and tracing for Functions must be implemented before the feature becomes +generally available (GA). -// This service defines the APIs for a containerized function runner. -service ContainerizedFunctionRunner { - rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} -} +### Caching -// Corresponds to Kubernetes' image pull policy. -enum ImagePullPolicy { - IF_NOT_PRESENT = 0; - ALWAYS = 1; - NEVER = 2; -} +The API proposed by this design accommodates caching. The `RunFunctionRequest` +includes a `meta.tag` field. The tag identifies a unique Function input. How the +tag is generated is up to the caller (i.e. Crossplane), but two functionally +identical Function inputs should have the same tag. A Function can optionally +signal that its response is valid for the same input for a period of time by +returning a non-zero `meta.ttl`. This allows Crossplane (or an intermediary such +as a reverse proxy) to cache the responses of deterministic and side-effect-less +Functions. -// Corresponds to go-containerregistry's AuthConfig type. -// https://pkg.go.dev/github.com/google/go-containerregistry@v0.11.0/pkg/authn#AuthConfig -message ImagePullAuth { - string username = 1; - string password = 2; - string auth = 3; - string identity_token = 4; - string registry_token = 5; -} +Building caching support is not in scope for beta. This will likely prove to be +a requirement for GA. -message ImagePullConfig { - ImagePullPolicy pull_policy = 1; - ImagePullAuth auth = 2; -} +### Dynamic Composed Resource Watches -// Containers are run without network access (in an isolated network namespace) -// by default. -enum NetworkPolicy = { - ISOLATED = 0; - ACCESSIBLE = 1; -} +Ideally XR reconciliation would be purely watch-triggered - this would result in +less work for the XR controller, and far fewer Function calls. -// Only resource limits are supported. Resource requests could be added in -// future if a runner supported them (e.g. by running containers on Kubernetes). -message Resources { - ResourceLimits limits = 1; -} +The XR controller currently watches the XR type, but is also poll-triggered, by +default polling desired state every 60 seconds. This interval can be changed by +the `--poll-interval` flag. The XR reconciler is poll-triggered because it wants +to know when composed resources change, in order to correct drift. -message ResourceLimits { - string memory = 1; - string cpu = 2; -} +An XR controller doesn't know what kinds of resources it will compose at start +time, which is typically when a controller's watches are configured. +Furthermore, two different XRs of the same kind might compose completely +different types of resources due to using different Compositions. -message RunFunctionConfig { - Resources resources = 1; - NetworkPolicy network = 2; - Duration timeout = 3; -} +This is something we should revisit regardless of Functions. For example it may +be possible to: -// The input FunctionIO is supplied as opaque bytes. -message RunFunctionRequest { - string image = 1; - bytes input = 2; - ImagePullConfig = 3; - RunFunctionConfig = 4; -} +1. Make one controller responsible (only) for selecting a Composition for an XR. +2. Start another controller for every unique (XR, Composition) tuple. +3. Restart the (XR, Composition) controller whenever the XR's + `spec.resourceRefs` changes, watching all referenced types. -// The output FunctionIO is supplied as opaque bytes. Errors encountered while -// running a function (as opposed to errors returned _by_ a function) will be -// encapsulated as gRPC errors. -message RunFunctionResponse { - bytes output = 1; -} -``` +### Runtime Configuration + +This design proposes Functions be long-running processes, installed by the +package manager. Deploying Functions as Kubernetes Deployments (with Services, +Service Accounts, etc) will no doubt necessitate something like a Provider's +ControllerConfig type. + +We've identified that we [don't want to proceed][controllerconfig-deprecation] +with ControllerConfig, but don't yet have a suitable alternative. Rather than +propagating a ControllerConfig-like pattern I propose we prioritize finding an +alternative. I intend to open a separate, simultaneous design to address this +since it will affect Provider packages as well as Functions. + +### Patch-and-Transform as a Function + +A key benefit of Functions is that composition logic is decoupled from the core +Crossplane release cycle. Moving composition logic out-of-tree and versioning it +separately from Crossplane allows us to iterate faster, and experiment more +freely. + +P&T style Composition could enjoy these benefits if it were not a special case, +and were just another Function. Imagine for example wanting a new type of patch +or a new type of transform and being able to simply fork the implementation to +experiment without affecting everyone who uses Crossplane. -### The Default Function Runner - -This document proposes that Crossplane include a default function runner. This -runner would be implemented as a sidecar to the core Crossplane container that -runs functions inside itself. - -The primary advantages of this approach are speed and control. There's no need -to wait for another system (for example the Kubernetes control plane) to -schedule each container, and the runner can easily pass stdout from one -container to another's stdin. Speed of function runs is of particular importance -given that each XR typically reconciles (i.e. invokes its function pipeline) -once every 60 seconds. - -The disadvantages of running the pipeline inside a sidecar container are scale -and reinvention of the wheel. The resources available to the sidecar container -will bound how many functions it can run at any one time, and it will need to -handle features that the Kubelet already offers such as pull secrets, caching -etc. - -[Rootless containers][rootless] appear to be the most promising way to run -functions as containers inside a container: - -> Rootless containers uses `user_namespaces(7)` (UserNS) for emulating fake -> privileges that are enough to create containers. The pseudo-root user gains -> capabilities such as `CAP_SYS_ADMIN` and `CAP_NET_ADMIN` inside UserNS to -> perform fake-privileged operations such as creating mount namespaces, network -> namespaces, and creating TAP devices. - -Using user namespaces allows the runner to use the other kinds of namespaces -listed above to ensure an extra layer of isolation for the functions it runs. -For example a network namespace could be configured to prevent a function having -network access. - -User namespaces are well supported by modern Linux Kernels, having been -introduced in Linux 3.8. Many OCI runtimes (including `runc`, `crun`, and -`runsc`) support rootless mode. `crun` appears to be the most promising choice -because: - -* It is more self-contained than `runc` (the reference and most commonly used - OCI runtime), which relies on setuid binaries to setup user namespaces. -* `runsc` (aka gVisor) uses extra defense in depth features which are not - allowed inside most containers due to their seccomp policies. - -Of course, "a container" is in fact many technologies working together and some -parts of rootless containers are less well supported than others; for example -cgroups v2 is required in order to limit resources like CPU and memory available -to a particular function. cgroups v2 has been available in Linux since 4.15, but -was not enabled by many distributions until 2021. In practice this means -Crossplane users must use a [sufficiently modern][cgroups-v2-distros] -distribution on their Kubernetes nodes in order to constrain the resources of a -Composition function. - -Similarly, [overlayfs] was not allowed inside user namespaces until Linux 5.11. -Overlayfs is typically used to create a root filesystem for a container that is -backed by a read-write 'upper' directory overlaid on a read-only 'lower' -directory. This allows the root OCI image filesystem to persist as a cache of -sorts, while changes made during the lifetime of a container can be easily -discarded. It's possible to replicate these benefits (at the expense of disk -usage and start-up time) by falling back to making a throwaway copy of the root -filesystem for each container run where overlayfs is not available. - -Under the approach proposed by this document each function run would involve the -following steps: - -1. Use [go-containerregistry] to pull the function's OCI image. -1. Extract (untar) the OCI image's flattened filesystem to disk. -1. Create a filesystem for the container - either an overlay or a copy of the - filesystem extracted in step 2. -1. Derive an [OCI runtime configuration][oci-rt-cfg] from the - [OCI image configuration][oci-img-cfg] supplied by go-containerregistry. -1. Execute `crun run` to invoke the function in a rootless container. - -Executing `crun` directly as opposed to using a higher level tool like `docker` -or `podman` allows the default function runner to avoid new dependencies apart -from a single static binary (i.e. `crun`). It keeps most functionality (pulling -images etc) inside the runner's codebase, delegating only container creation to -an external tool. Composition Functions are always short-lived and should always -have their stdin and stdout attached to the runner, so wrappers like -`containerd-shim` or `conmon` should not be required. The short-lived, "one -shot" nature of Composition Functions means it should also be acceptable to -`crun run` the container rather than using `crun create`, `crun start`, etc. - -At the time of writing rootless containers appear to be supported by Kubernetes, -including Amazon's Elastic Kubernetes Service (EKS) and Google Kubernetes Engine -(GKE). - -Testing using GKE 1.21.10-gke.2000 with Container Optimized OS (with containerd) -cos-89-16108-604-19 nodes (Kernel COS-5.4.170) found that it was possible to run -`unshare -rUm` (i.e. to create a new user and mount namespace) inside an Alpine -Linux container as long as AppArmor was disabled by applying the annotation -`container.apparmor.security.beta.kubernetes.io/${CONTAINER_NAME}=unconfined`. -It's possible to create user namespaces with AppArmor enabled, but not to create -mount namespaces with different mount propagation from their parent. - -It is not possible to use rootless containers with gVisor enabled, as gVisor -does not yet [support mount namespaces][gvisor-mountns]. This means that it is -not possible to use rootless containers with GKE Autopilot, which requires that -gVisor be used. - -Testing using EKS v1.21.5-eks-9017834 with Amazon Linux 2 nodes (Kernel -5.4.188-104.359.amzn2.x86_64) found that it was possible to run `unshare -rUm` -inside an Alpine Linux container 'out of the box'. - -The `unshare` syscall used to create containers is rejected by the default -Docker and containerd seccomp profiles. seccomp is disabled ("Unconstrained") by -default in Kubernetes, but that will soon change per [this KEP][kep-seccomp] -which proposes that Kubernetes use the seccomp profiles of its container engine -(i.e. containerd) by default. Once this happens Crossplane will either need to -run with the "Unconstrained" seccomp profile, or a variant of the default -containerd seccomp profile that allows a few extra syscalls (i.e. at least -`unshare` and `mount`). This can be done by setting a Pod's -`spec.securityContext.seccompProfile.type` field to `Unconstrained`. - -### Packaging Containerized Functions - -This document proposes that containerized functions support Crossplane [package -metadata][package-meta] in the form of a `package.yaml` file at the root of the -flattened filesystem and/or the OCI layer annotated as `io.crossplane.xpkg: -base` per the [xpkg spec][xpkg-spec]. This `package.yaml` file would contain a -custom-resource-like YAML document of type `Function.meta.pkg.crossplane.io`. - -Unlike `Configuration` and `Provider` packages, `Function` packages would not -actually be processed by the Crossplane package manager but rather by the -Composition (`apiextensions`) machinery. In practice Crossplane would be -ignorant of the `package.yaml` file; it would exist purely as a way to attach -"package-like" metadata to containerized Crossplane functions. Therefore, unlike -the existing package types the `package.yaml` would contain no `spec` section. - -An example `package.yaml` might look like: +P&T Composition as a Function might look like this: ```yaml -# Required. Must be as below. -apiVersion: meta.pkg.crossplane.io/v1alpha1 -# Required. Must be as below. -kind: Function -# Required. +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition metadata: - # Required. Must comply with Kubernetes API conventions. - name: function-example - # Optional. Must comply with Kubernetes API conventions. - annotations: - meta.crossplane.io/source: https://github.com/negz/example-fn - meta.crossplane.io/description: An example function + name: example +spec: + compositeTypeRef: + apiVersion: database.example.org/v1alpha1 + kind: AcmeCoDatabase + pipeline: + - step: patch-and-transform + functionRef: + name: patch-and-transform + input: + apiVersion: apiextensions.crossplane.io/v1 + kind: Resources + resources: + - name: cloudsqlinstance + base: + apiVersion: database.gcp.crossplane.io/v1beta1 + kind: CloudSQLInstance + spec: + forProvider: + databaseVersion: POSTGRES_9_6 + region: us-central1 + settings: + tier: db-custom-1-3840 + dataDiskType: PD_SSD + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.parameters.storageGB + toFieldPath: spec.forProvider.settings.dataDiskSizeGb ``` +Removing P&T support from the Composition type would be a breaking API change - +not something we could do easily. One approach could be to deprecate and +'freeze' the API, recommending folks who prefer P&T style use the generic P&T +Function instead. + ## Alternatives Considered -Most of the alternatives considered in this design could also be thought of as -future considerations. In most cases these alternatives don't make sense at the -present time but likely will in the future. - -### Using Webhooks to Run Functions - -Crossplane could invoke functions by calling a webhook rather than running an -OCI container. In this model function input and output would still take the form -of a `FunctionIO`, but would be HTTP request and response bodies rather than a -container's stdin and stdout. - -The primary detractor of this approach is the burden it puts on function authors -and Crossplane operators. Rather than simply publishing an OCI image the author -and/or Crossplane operator must deploy and operate a web server, ensuring secure -communication between Crossplane and the webhook endpoint. - -Support for `type: Webhook` functions will likely be added shortly after initial -support for `type: Container` functions is released. - -### Using chroots to Run Functions - -Crossplane could invoke functions packaged as OCI images by unarchiving them and -then running them inside a simple `chroot`. This offers more compatibility than -rootless containers at the expense of isolation - it's not possible to constrain -a chrooted function's compute resources, network access, etc. `type: Chroot` -functions would use the same artifacts as `type: Container` functions but invoke -them differently. - -Support for `type: Chroot` functions could be added shortly after initial -support for `type: Container` functions are released if `type: Container` proves -to be insufficiently compatible (e.g. for clusters running gVisor, or that -require seccomp be enabled). - -### Using Kubernetes to Run Containerized Functions - -Asking Kubernetes to run a container pipeline is less straightforward than you -might think. Crossplane could schedule a `Pod` for each XR reconcile, or create -a `CronJob` to do so regularly. Another option could be to connect directly to a -Kubelet. This approach would enjoy all the advantages of the existing Kubelet -machinery (pull secrets, caching, etc) but incurs overhead in other areas, for -example: - -* Every reconcile requires a pod to be scheduled, which may potentially block on - node scale-up, etc. -* stdin and stdout must be streamed via the API server, for example by using the - [`/attach` subresource][attach]. -* Running containers in order requires either (ab)using init containers or - injecting a middleware binary that blocks container starts to ensure they run - in order (similar to Argo Workflow's '[emissary]' executor): - -> The emissary works by replacing the container's command with its own command. -> This allows that command to capture stdout, the exit code, and easily -> terminate your process. The emissary can also delay the start of your process. - -You can see some of the many options Argo Workflows explored to address these -issues before landing on `emissary` in their list of -[deprecated executors][argo-deprecated-executors]. - -### Using KRM Function Spec Compliant Functions - -While the design proposed by this document is heavily inspired by KRM Functions, -the [KRM function specification][krm-fn-spec] as it currently exists is not an -ideal fit. This is because: - -1. It is built around the needs of CLI tooling - including several references to - (client-side) 'files' that don't exist in the Crossplane context. -1. Crossplane needs additional metadata to distinguish which resource in the - `ResourceList` is the composite resource and which are the composed - resources. - -### gVisor - -[gVisor][gvisor] supports rootless mode, but requires too many privileges to run -in a container. A proof-of-concept [exists][gvisor-unpriv] to add an -`--unprivileged` flag to gVisor, allowing it to run inside a container. It's -unlikely that gVisor will work in all situations in the near future - for -example gVisor cannot currently run inside gVisor and support for anything other -than x86 architectures is experimental. +See the [alpha design document][alpha-design]. + + [term-composition]: https://crossplane.io/docs/v1.9/concepts/terminology.html#composition [v0.10.0]: https://github.com/crossplane/crossplane/releases/tag/v0.10.0 @@ -752,27 +1139,29 @@ than x86 architectures is experimental. [declarative-app-management]: https://docs.google.com/document/d/1cLPGweVEYrVqQvBLJg6sxV-TrE5Rm2MNOBA_cxZP2WU/edit [bcl]: https://twitter.com/bgrant0607/status/1123620689930358786?lang=en [terraform-count]: https://www.terraform.io/language/meta-arguments/count -[turing-complete]: https://en.wikipedia.org/wiki/Turing_completeness#Unintentional_Turing_completeness -[pitfalls-dsl]: https://github.com/kubernetes/community/blob/8956bcd54dc6f99bcb681c79a7e5399289e15630/contributors/design-proposals/architecture/declarative-application-management.md#pitfalls-of-configuration-domain-specific-languages-dsls -[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime -[krm-fn-spec]: https://github.com/kubernetes-sigs/kustomize/blob/9d5491/cmd/config/docs/api-conventions/functions-spec.md -[rawextension]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#rawextension -[unix-domain-sockets]: https://man7.org/linux/man-pages/man7/unix.7.html -[rootless]: https://rootlesscontaine.rs/how-it-works/userns/ -[cgroups-v2-distros]: https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled -[overlayfs]: https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html -[go-containerregistry]: https://github.com/google/go-containerregistry -[oci-rt-cfg]: https://github.com/opencontainers/runtime-spec/blob/v1.0.2/config.md -[oci-img-cfg]: https://github.com/opencontainers/image-spec/blob/v1.0.2/config.md -[gvisor-mountns]: https://github.com/google/gvisor/issues/221 -[kep-seccomp]: https://github.com/kubernetes/enhancements/issues/2413 -[package-meta]: https://github.com/crossplane/crossplane/blob/035e77b/design/one-pager-package-format-v2.md -[xpkg-spec]: https://github.com/crossplane/crossplane/blob/035e77b/docs/reference/xpkg.md -[attach]: https://github.com/kubernetes/kubectl/blob/18a531/pkg/cmd/attach/attach.go -[emissary]: https://github.com/argoproj/argo-workflows/blob/702b293/workflow/executor/emissary/emissary.go#L25 -[argo-deprecated-executors]: https://github.com/argoproj/argo-workflows/blob/v3.4.1/docs/workflow-executors.md -[krm-fn-spec]: https://github.com/kubernetes-sigs/kustomize/blob/9d5491/cmd/config/docs/api-conventions/functions-spec.md -[krm-fn-runtimes]: https://github.com/GoogleContainerTools/kpt/issues/2567 -[krm-fn-catalog]: https://catalog.kpt.dev -[gvisor]: https://gvisor.dev -[gvisor-unpriv]: https://github.com/google/gvisor/issues/4371#issuecomment-700917549 \ No newline at end of file +[alpha-design]: defunct/design-doc-composition-functions.md +[#1972]: https://github.com/crossplane/crossplane/pull/1972 +[#2352]: https://github.com/crossplane/crossplane/pull/2352 +[#4051]: https://github.com/crossplane/crossplane/pull/4051 +[#3917]: https://github.com/crossplane/crossplane/pull/3917 +[#3919]: https://github.com/crossplane/crossplane/pull/3919 +[#3989]: https://github.com/crossplane/crossplane/pull/3989 +[#3498]: https://github.com/crossplane/crossplane/pull/3498 +[#3458]: https://github.com/crossplane/crossplane/pull/3458 +[#3316]: https://github.com/crossplane/crossplane/pull/3316 +[#4036]: https://github.com/crossplane/crossplane/issues/4036 +[#4065]: https://github.com/crossplane/crossplane/issues/4065 +[#4026]: https://github.com/crossplane/crossplane/issues/4026 +[google-protobuf-struct]: https://protobuf.dev/reference/protobuf/google.protobuf/#struct +[cert-per-entity]: https://github.com/crossplane/crossplane/issues/4305 +[#3884]: https://github.com/crossplane/crossplane/pull/3884 +[go-grpc-concurrency]: https://github.com/grpc/grpc-go/blob/master/Documentation/concurrency.md#servers +[grpc-benchmarks]: https://grpc.io/docs/guides/benchmarking/ +[functionio-schema]: https://github.com/crossplane/crossplane/blob/v1.12.2/apis/apiextensions/fn/io/v1alpha1/functionio_types.go#L28 +[kubecon-eu-contribfest]: https://github.com/crossplane-contrib/contribfest/blob/main/lab-composition-functions/xfn-many/main.go +[grpc-supported-languages]: https://grpc.io/docs/languages/ +[starlark]: https://github.com/bazelbuild/starlark/blob/master/spec.md +[github-script]: https://github.com/actions/github-script +[controller-runtime-metrics]: https://book.kubebuilder.io/reference/metrics-reference.html +[event-exporter-metrics]: https://github.com/caicloud/event_exporter +[controllerconfig-deprecation]: https://github.com/crossplane/crossplane/issues/2468 \ No newline at end of file diff --git a/design/design-doc-observe-only-resources.md b/design/design-doc-observe-only-resources.md index 01067c4c3..04ba953ec 100644 --- a/design/design-doc-observe-only-resources.md +++ b/design/design-doc-observe-only-resources.md @@ -2,7 +2,7 @@ * Owners: Hasan Turken (@turkenh) * Reviewers: Crossplane Maintainers -* Status: Draft +* Status: Accepted ## Background @@ -82,6 +82,10 @@ the desired state and the observed state of the resource and when the updated with the latest observation of the resource. ### Management Policy +> [!NOTE] +> The management policy was significantly changed in a +subsequent design for [ignore changes]. Keeping this section for historical +purposes. To support observing resources without taking ownership, we will introduce a new spec named `managementPolicy` to the Managed Resources. We will also deprecate @@ -713,3 +717,4 @@ it as a migration path to Crossplane. [desired tags]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc#tags [passing sensitive configuration]: https://github.com/crossplane/crossplane/pull/2886#discussion_r862615416 [`type: Webhook` composition function]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-composition-functions.md#using-webhooks-to-run-functions +[ignore changes]: https://github.com/crossplane/crossplane/blob/ad0ff7d6d0e4850168883905ed8e1509089cea15/design/one-pager-ignore-changes.md \ No newline at end of file diff --git a/design/images/usage-owner-ref.png b/design/images/usage-owner-ref.png new file mode 100644 index 000000000..14d2be2e3 Binary files /dev/null and b/design/images/usage-owner-ref.png differ diff --git a/design/one-pager-generic-usage-type.md b/design/one-pager-generic-usage-type.md new file mode 100644 index 000000000..19d6f2680 --- /dev/null +++ b/design/one-pager-generic-usage-type.md @@ -0,0 +1,498 @@ +# Generic `Usage` Type for Deletion Ordering + +* Owner: Hasan Türken (@turkenh) +* Reviewers: @bobh66, @negz +* Status: Draft + +## Background + +Crossplane is built on Kubernetes, which leans into eventual consistency. When +several things need to happen in a particular order the "Kubernetes way" is to +specify the desired end state and rely on the relevant controllers to ensure the +system eventually arrives at that state, largely through the use of constant +reconciliation (aka retries). + +Take for example the creation of a VPC Network and a Subnet. The VPC must be +created and ready before a Subnet can be created within it. In Kubernetes, it’s +typical to request both resources be created at once (thereby declaring the +desired state). The creation of the Subnet will simply fail until its +dependency - the VPC - becomes available. + +This loosely coupled, eventually consistent approach is simple and resilient, +but it can appear chaotic. Operations must be attempted more times than would be +necessary in a "smarter" system, and often the temporarily failing operations +will emit warnings that may be confusing or concerning to the uninitiated. Some +other systems - notably Terraform - avoid this by computing a dependency graph +before attempting to enact the desired state. This allows the system to avoid +trying operations it knows will fail due to missing dependencies until those +dependencies are expected to be satisfied. + +A frequent negative side-effect of the eventually consistent approach that we +see in Crossplane is resources becoming orphaned at delete time because their +successful deletion depends on the existence of other resources which are often +deleted before their dependents. It’s possible to author a set of interdependent +resources that will eventually create successfully, but may not eventually +delete successfully. + +Two variants of this problem are: + +- **Orphaned Managed Resources (MRs).** Consider a Helm Release MR that is +deployed to an EKS Cluster MR. The Helm Release has a dependency on the EKS +Cluster’s connection details - it uses them as Provider credentials. If the EKS +Cluster is deleted before the Helm Release the Helm Release controller becomes +unable to connect to its Provider in order to delete the Release. In practice +the Release External Resource (ER) was implicitly deleted along with the EKS +Cluster - there’s actually no work left for the Helm release controller to do - +but it cannot know that. This results in the Helm Release MR being ‘orphaned’ +in a pending deletion that can never be satisfied. + +- **Orphaned ‘Side-Effect’ Resources.** Consider the same example as above, and +assume the Helm Release includes a Kubernetes Service resource of type: +LoadBalancer. A side-effect of the creation of this Service will be to create an +ELB in the EKS cluster’s VPC. This ELB is not managed by or even known to +Crossplane, and is not deleted along with the EKS cluster. This means that the +Helm Release (and therefore its Service) must be deleted before the EKS cluster +in order to ensure the EKS cluster’s controllers trigger deletion of the ELB. +If it is not, the ELB will be orphaned, costing money and blocking the deletion +of the VPC it was created in. + +These problems are exacerbated by the fact that Kubernetes uses "background +cascading deletion" by default. Put otherwise, the successful deletion of a +parent resource is not dependent on the successful deletion of its children. +Instead, the parent resource will be deleted successfully and the children +(i.e. resources that declare the parent as an owner reference) will later be +deleted by the Kubernetes garbage collector. This makes it difficult to diagnose +or even notice resources that become orphaned when their parent XR is deleted. + +## Goals + +- Support "cross-provider" dependencies, for example an MR from provider-helm +depending on a ProviderConfig from provider-aws. +- Don’t require Crossplane users to manage fine-grained RBAC access. +- Don’t grant Crossplane providers broad RBAC access (e.g. to all other providers). + +## Proposal + +We propose introducing a new `Usage` type that can be used to declare usage +relationships between Crossplane resources. This type will be defined by the +Core Crossplane repository, and also be available for use as part of a +composition. + +The relations defined by this type will be enforced by an admission webhook that +will be running as part of the Crossplane core. Similar to the upstreams [liens +proposal], the webhook will reject deletions of resources that are in use by +other resources with a ["409 Conflict" error code] as `metav1.Status` as part of +the admission response (not the actual http code of the admission request). + +The RBAC manager already grants Crossplane RBAC access to all types in order for +it to do Composition. Therefore, there's no need for extra RBAC permissions to +interact with resources across providers. + +### API + +We will introduce a new `Usage` type as follows: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: Usage +metadata: + name: release-uses-cluster +spec: + # Reason is optional when this Usage defines a "Dependency" type relationship, + # i.e. when the `spec.by` field is defined. + # It is required when the Usage is meant to be used for "Protection" purposes, + # i.e. when the `spec.by` is NOT defined. + reason: "Release uses Cluster" + # Reference to the resource that is being used. + of: + apiVersion: eks.upbound.io/v1beta1 + kind: Cluster + resourceRef: + name: my-cluster + # Reference to the resource that is using the other resource. + # This field is optional and can be omitted when the Usage is meant to be used + # for "Protection" purposes. + by: + apiVersion: helm.crossplane.io/v1beta1 + kind: Release + resourceRef: + name: my-prometheus-chart +``` + +The `spec.by` field will define the resource that will be using the resource +defined in `spec.of`. Both will support only cluster-scoped resources, namely +`Composites` and `Managed Resources`. + +In this example, we define a usage relationship between a Helm `Release` and an +EKS `Cluster`. The deletion of the `Cluster` resource will be rejected as long +as this `Release` resource exists. + +In addition to direct referencing with names, we will also support selectors to +match labels or controller reference. This will allow us to define usage +relationships between resources created by the same composition instance. + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: Usage +metadata: + name: release-uses-cluster +spec: + of: + apiVersion: eks.upbound.io/v1beta1 + kind: Cluster + resourceSelector: + matchControllerRef: true + matchLabels: + foo: bar + by: + apiVersion: helm.crossplane.io/v1beta1 + kind: Release + resourceSelector: + matchControllerRef: true + matchLabels: + baz: qux +``` + +Another use case for `Usage` is to protect a resource from being deleted without +necessarily being used by another resource. For example, a `Usage` that will +prevent the deletion of a database instance could be defined as follows: + +```yaml +apiVersion: crossplane.io/v1 +kind: Usage +spec: + reason: "Production Database - should never be deleted" + of: + apiVersion: rds.aws.upbound.io/v1beta1 + kind: Instance + resourceRef: + name: my-cluster +``` + +### Implementation + +The implementation of this feature will be done in two parts: + +1. The `Usage` type and its controller, which will manage the lifecycle of the + resource. +2. The admission webhook that will enforce the usage relationships. + +Both will be implemented in the Crossplane core repository and would not require +any changes in the providers. + +#### Usage Controller + +The usage controller will be responsible for managing the lifecycle of the usage +relationship by: + +- Resolving selectors, if any. +- Adding owner reference from the using resource (i.e., `spec.by`) to the `Usage`. +- Adding/removing owner reference from the `Usage` resource to the used resource (i.e., `spec.of`). +- Preventing deletion of `Usage` before the using resource is deleted. +- Adding/removing `crossplane.io/in-use: true` label to ensure the `DELETE` + request intercepted by matching the selector in admission webhook rule. + +**Resolving selectors:** + +The API will support defining selectors for both the using and used resource. +Initially, we will only support resolving references with the following rules: + +- Resolution will be made once when the `Usage` is created. +- If multiple resources match the selector, a random one will be + selected. + +Once selectors are resolved, the controller will fill `resourceRef.name`. + +Example: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: Usage +metadata: + name: release-uses-clusters +spec: + of: + apiVersion: eks.upbound.io/v1beta1 + kind: Cluster + resourceSelector: + matchControllerRef: true + matchLabels: + foo: bar + by: + apiVersion: helm.crossplane.io/v1beta1 + kind: Release + resourceRef: + name: my-prometheus-chart +``` + +to: + +```yaml +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: Usage +metadata: + name: release-uses-clusters +spec: + of: + apiVersion: eks.upbound.io/v1beta1 + kind: Cluster + resourceRef: # added by the controller based on the selectors + name: my-cluster # added by the controller based on the selectors + resourceSelector: + matchControllerRef: true + matchLabels: + foo: bar + by: + apiVersion: helm.crossplane.io/v1beta1 + kind: Release + resourceRef: + name: my-prometheus-chart +``` + +The [existing policies] for resolving Managed Resource references does not +necessarily apply to the `Usage`. For example, supporting an `optional` +reference while defining a usage relationship does not make sense. Also, the +[reasoning behind] supporting different policies for MRs does not apply to +the `Usage`. However, depending on the feedback, we may consider supporting +resolve policies to configure whether the resolution should be made once or +continuously in a future iteration. + +**Owner references:** + +We want the `Usage` to be deleted when the using resource is deleted. This will +be achieved by adding an owner reference from the using resource to the `Usage`. + +Another owner reference will be added from the `Usage` to the used resource to +prevent Garbage Collector from attempting to attempt to delete the used resource +before the `Usage` is deleted. This is mostly a mitigation for the case where +deletion process of composites, which rely on GC, could take too long because of +the exponential backoff in the Garbage Collector as a result of failed DELETE +API calls. + +
+ +**Preventing early deletion of the `Usage`:** + +Typically, we expect a `Usage` resource to be defined as part of the same +composition as the using and used resources. This means the `Usage` will get +the delete request when the composite resource is deleted. In this case, the +controller will prevent the deletion of the `Usage` until the using resource is +deleted with the help of a finalizer. + +**Adding/removing `crossplane.io/in-use: true` label:** + +The admission webhook will intercept the `DELETE` requests for the used +resources with the following admission webhook rule: + +```yaml +webhooks: +- name: nousages.apiextensions.crossplane.io + objectSelector: + matchLabels: + crossplane.io/in-use: true + rules: + - operations: ["DELETE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + scope: "*" +``` + +To ensure any delete request is intercepted by our webhook for a resource that +is in use, the controller will add the `crossplane.io/in-use: true` label to the +used resource when a `Usage` is created. It is also the controller's job to +remove this labels when the `Usage` is deleted, and no other `Usage` resources +reference the same used resource. + +#### Admission Webhook + +The admission webhook will be responsible for intercepting the `DELETE` requests +for the used resource(s) and rejecting them with a ["409 Conflict" error code]. +We will fail open in case of any errors from the webhook with +[`failurePolicy: Fail`]. + +The webhook will be implemented as a Kubernetes validating admission webhook +leveraging the [existing webhook machinery] in Crossplane and will be running +as part of the Crossplane core. + +We will use the [Field Indexers] to efficiently query the used resource(s) when +a `DELETE` request is received. + +Implementation of the core logic for the webhook handler: + +```go +func (h *handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured) admission.Response { + usageList := &v1alpha1.UsageList{} + if err := h.reader.List(ctx, usageList, client.MatchingFields{inUseIndexKey: getIndexValueForObject(u)}); err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + if len(usageList.Items) > 0 { + return admission.Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: int32(http.StatusConflict), + Reason: metav1.StatusReason(fmt.Sprintf("The resource is used by %d resource(s), including %s/%s", len(usageList.Items), usageList.Items[0].Spec.By.Kind, usageList.Items[0].Spec.By.Name)), + }, + }, + } + } + return admission.Allowed("") +} +``` + +### User Experience + +#### Defining a Usage Resource as part of a Composition + +We expect a typical usage relationship to be defined as part of the same +composition as the using and used resources. In this case, the user will +define the `Usage` resource as part of the composition template. + +Let's take [upbound/platform-ref-aws] as an example. In this composition, we +have a [composition for `XCluster`] which composes `XEKS` composing an EKS cluster, +and `XServices` composing a set of helm releases running on the cluster. In this +case, we will add the following resource to the same composition to define a +usage relationship between the `XServices` and the `XEKS` Composites: + +```yaml + - name: xservices-uses-xeks + base: + apiVersion: apiextensions.crossplane.io/v1alpha1 + kind: Usage + spec: + of: + - apiVersion: aws.platformref.upbound.io/v1alpha1 + kind: XEKS + selector: + matchControllerRef: true + by: + apiVersion: aws.platformref.upbound.io/v1alpha1 + kind: XServices + selector: + matchControllerRef: true +``` + +#### Deleting a Claim or Composite which contains a Usage Relationship + +**With the (default) background propagation policy**, we expect no different in user +experience. Users will be able to delete the Claim or Composite without any error. +The Claim or Composite will be deleted immediately, and the Kubernetes garbage +collector will delete the Composed Resources together with the `Usage` resources +in the background. It will take longer until all Composed Resources are garbage +collected, however this will not be visible to the user. + +**With the foreground propagation policy**, the only difference we expect is +that the deletion is taking longer than before. The user will be able to delete +the Claim or Composite without any error. The deletion of the Claim or Composite +will be blocked until all Composed Resources are deleted as before. It will take +longer because deletion of the used resource will be blocked until the used +resource hence `Usage` is deleted. + +#### Directly Deleting a Composite or Managed Resource that is in Use + +When trying to delete a Composite or managed resource directly, users will get +an error as follows: + +```bash +Error from server (The resource is used by Release/my-release): error when deleting "my-cluster.yaml": admission webhook "nousages.apiextensions.crossplane.io" denied the request: The resource is used by Release/my-release +``` + +From the error message, it is clear that the resource is in use by a `Release` +resource. The user can then delete the `Release` resource and then delete the +Composite or managed resource again. + +## Alternatives Considered + +### Prior Art + +Some alternatives considered in this [previous document], namely: + +- Assume Implicit Deletion +- In-use Liens +- Composition-internal Dependencies +- Composition-ordered Dependencies +- Introduce a Global Dependency Object +- Introduce a Scoped Dependency Object +- Model Dependencies in Spec +- Dependent-managed Finalizers + +Please see that document for further details on each of these alternatives. + +Among these alternatives, this proposal focuses on the +"Introduce a Global Dependency Object" option but with some implementation +differences which are explained in detail in the following section. + +### Alternative Implementations with a Global `Usage` Object + +There are three distinct types of resources that we want to block the deletion +of when used by other resources: + +1. A Managed Resource. + - We control the deletion logic; we have the most power. + - We can use an annotation or finalizer (or whatever we want) and handle the + logic in managed reconciler for all managed resources, e.g., do not delete + the external resource if the managed resource is in use. +2. A Composite Resource with the (default) background deletion policy. + - We rely on the garbage collector for deletion. Composed Resources got the + deletion request only after the Composite disappeared from the API. + - So, we can still rely on a finalizer-based approach to block the deletion + of the whole tree of owned (Composed) Resources. +3. A Composite Resource but with the foreground deletion policy. + - We still rely on the garbage collector for deletion; however, Composed + Resources got the deletion request immediately after the Composite got the + deletion request, no matter whether it has additional finalizers or not. + - Finalizer-based approaches wouldn't work out of the box. + +So, we have three possible solutions to cover all the scenarios: + +**Option A:** Block the delete request with the help of a webhook in any case. + +**Option B:** When there is a dependency on a Composite Resource, resolve that +dependency down to all Composed Resources and only implement deletion prevention +logic for the Managed Resources. + +**Option C:** For Composite resources, do not rely on the garbage collector by +removing owner references to delete Composed Resources and implement our custom +deletion logic. + +Option C is not ideal because it requires us to implement our custom deletion +logic for Composites, and will break the tools using owner references +(e.g. ArgoCD) to track the relationship between Composites and Composed +Resources. + +If we try to make a comparison between the other two approaches: + +| | Option A: Webhook & Reject Delete Requests | Option B: Delay Deletion Until No Usage with Resolved `Usages` | +|-------------------|--------------------------------------------|----------------------------------------| +| Implementation Complexity | Easier to implement, fewer complexities to handle during development. | More complex to implement due to the need to _continuously_ resolve dependencies to XRs down to nested XRs and MRs . | +| User Experience | Provides immediate feedback, but users need to manually retry deletion after all usages are gone. | Users do not need to manually retry deletions, which may be smoother, but the delay in deletion might be confusing if not properly communicated. | +| Alignment with the Kubernetes Philosophy | Aligned with Kubernetes' Liens KEP (still open), which advocates for proactive protection against deletion. | Aligned with Kubernetes' philosophy of eventual consistency and the existing behavior of PV/PVC protection and Crossplane ProviderConfigUsage. | +| Scalability | Scales better since it will require one `Usage` per XR. | Ends up with multiple `Usages` per XR and can grow quickly when there are nested XRs or multiple dependencies on the same parent. | +| User Error Protection | The upfront rejection of deletion requests can prevent accidental deletion when there are still usages. | While deletion will eventually occur when all usages are gone, there could be a risk of unintended consequences during the delay period. | +| Debugging/Troubleshooting | Easier to debug since only `Usage`s created by the user exists, and resources won't have deletion timestamp. | More difficult to debug since further `Usage`s created by the controllers, and it is not easy to identify whether a resource is being deleted or waiting for dependencies to be gone at a given moment. | +| Ease of Rollout | Easier, only requires changes in the XP repo. | Requires changes in XP, runtime, and providers needs to be updated. | + +Especially considering that there is no clear agreement in upstream, we +believe _it could be good to choose a practical approach and start with the +simpler solution, namely the webhook-based solution_. We think it's better to +start with the simple option, and then, if we find out that it's not enough, we +can think about changing to a more complex solution. If we check the above table, +it is clear that the webhook-based solution is the simpler one. + +If we decide to change the underlying implementation later, we don't expect +an API change except getting rejected deletes vs. not. This also provides us a +good opportunity to test the webhook-based solution in the field and gather +feedback from the community before committing to a more complex solution. + +[liens proposal]: https://github.com/kubernetes/enhancements/pull/2840 +["409 Conflict" error code]: https://datatracker.ietf.org/doc/html/rfc2616#section-10.4.10 +[existing policies]: https://github.com/crossplane/crossplane-runtime/blob/23eaff94e7385121bca832955c8885f925f55ae6/apis/common/v1/resource.go#L80 +[reasoning behind]: https://github.com/crossplane/crossplane-runtime/pull/328 +[webhook rules]: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-rules +[`failurePolicy: Fail`]: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy +[existing webhook machinery]: https://github.com/crossplane/crossplane/pull/2919 +[Field Indexers]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client#FieldIndexer +[previous document]: https://docs.google.com/document/d/1Yu3GxNMJOOMWMjD6gEw5QtT8Fz5qQSyo18ZZFKqcqKw +[upbound/platform-ref-aws]: https://github.com/upbound/platform-ref-aws/tree/v0.6.0 +[composition for `XCluster`]: https://github.com/upbound/platform-ref-aws/blob/v0.6.0/package/cluster/composition.yaml \ No newline at end of file diff --git a/design/one-pager-ignore-changes.md b/design/one-pager-ignore-changes.md index 1bfb5e81a..d0eccc0ca 100644 --- a/design/one-pager-ignore-changes.md +++ b/design/one-pager-ignore-changes.md @@ -2,7 +2,7 @@ * Owners: Lovro Sviben (@lsviben) * Reviewers: @turkenh, @negz -* Status: Draft +* Status: Accepted ## Background diff --git a/go.mod b/go.mod index c39d75acd..16c9799a7 100644 --- a/go.mod +++ b/go.mod @@ -7,11 +7,11 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 github.com/Masterminds/semver v1.5.0 github.com/alecthomas/kong v0.8.0 - github.com/bufbuild/buf v1.22.0 - github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230622044456-2dfb8bc6bfbc + github.com/bufbuild/buf v1.25.1 + github.com/crossplane/crossplane-runtime v0.20.1 github.com/cyphar/filepath-securejoin v0.2.3 github.com/google/go-cmp v0.5.9 - github.com/google/go-containerregistry v0.15.2 + github.com/google/go-containerregistry v0.16.1 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230617045147-2472cbbbf289 github.com/google/uuid v1.3.0 github.com/jmattheis/goverter v0.17.4 @@ -20,10 +20,10 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.9.5 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 - google.golang.org/grpc v1.56.1 + golang.org/x/sys v0.11.0 + google.golang.org/grpc v1.57.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.30.0 + google.golang.org/protobuf v1.31.0 k8s.io/api v0.27.3 k8s.io/apiextensions-apiserver v0.27.3 k8s.io/apimachinery v0.27.3 @@ -32,11 +32,14 @@ require ( k8s.io/utils v0.0.0-20230505201702-9f6742963106 kernel.org/pub/linux/libs/security/libcap/cap v1.2.69 sigs.k8s.io/controller-runtime v0.15.0 - sigs.k8s.io/controller-tools v0.12.0 + sigs.k8s.io/controller-tools v0.12.1 sigs.k8s.io/e2e-framework v0.2.0 + sigs.k8s.io/kind v0.20.0 sigs.k8s.io/yaml v1.3.0 ) +require google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + require ( cloud.google.com/go/compute v1.19.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect @@ -67,9 +70,9 @@ require ( github.com/aws/smithy-go v1.13.5 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/connect-go v1.8.0 // indirect - github.com/bufbuild/connect-opentelemetry-go v0.3.0 // indirect - github.com/bufbuild/protocompile v0.5.1 // indirect + github.com/bufbuild/connect-go v1.9.0 // indirect + github.com/bufbuild/connect-opentelemetry-go v0.4.0 // indirect + github.com/bufbuild/protocompile v0.6.0 // indirect github.com/cenkalti/backoff/v3 v3.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect @@ -78,10 +81,10 @@ require ( github.com/dave/jennifer v1.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v24.0.2+incompatible // indirect + github.com/docker/cli v24.0.4+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/docker v24.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect @@ -90,7 +93,7 @@ require ( github.com/fatih/color v1.15.0 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-chi/chi/v5 v5.0.8 // indirect + github.com/go-chi/chi/v5 v5.0.10 // indirect github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -107,7 +110,7 @@ require ( github.com/google/gnostic v0.6.9 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect + github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.0.0 // indirect @@ -125,7 +128,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -140,7 +143,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc3 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/prometheus/client_golang v1.15.1 // indirect @@ -152,7 +155,7 @@ require ( github.com/ryanuber/go-glob v1.0.0 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.2.1 // indirect + github.com/tetratelabs/wazero v1.3.1 // indirect github.com/vbatts/tar-split v0.11.3 // indirect github.com/vladimirvivien/gexe v0.2.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect @@ -162,17 +165,16 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.10.0 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.11.0 // indirect; indirect // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.13.0 // indirect; indirect // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.10.0 // indirect + golang.org/x/tools v0.11.0 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 08fe9603d..ff32c864e 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,7 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -83,6 +84,7 @@ github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2o github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= github.com/alecthomas/kong v0.8.0/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -121,14 +123,14 @@ github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bufbuild/buf v1.22.0 h1:dCWUIx1gm3nm5U+FKdkVjaL+Rk9Ev3hh4XYMa2Cbn/o= -github.com/bufbuild/buf v1.22.0/go.mod h1:ERFRzJiIjAOzUSJ3vz1zoI7XfxlBnCwZEyL+NJm4pko= -github.com/bufbuild/connect-go v1.8.0 h1:srluNkFkZBfSfg9Qb6DrO+5nMaxix//h2ctrHZhMGKc= -github.com/bufbuild/connect-go v1.8.0/go.mod h1:GmMJYR6orFqD0Y6ZgX8pwQ8j9baizDrIQMm1/a6LnHk= -github.com/bufbuild/connect-opentelemetry-go v0.3.0 h1:AuZi3asTDKmjGtd2aqpyP4p5QvBFG/YEaHopViLatnk= -github.com/bufbuild/connect-opentelemetry-go v0.3.0/go.mod h1:r1ppyTtu1EWeRodk4Q/JbyQhIWtO7eR3GoRDzjeEcNU= -github.com/bufbuild/protocompile v0.5.1 h1:mixz5lJX4Hiz4FpqFREJHIXLfaLBntfaJv1h+/jS+Qg= -github.com/bufbuild/protocompile v0.5.1/go.mod h1:G5iLmavmF4NsYtpZFvE3B/zFch2GIY8+wjsYLR/lc40= +github.com/bufbuild/buf v1.25.1 h1:8ed5AjZ+zPIJf72rxtfsDit/MtaBimaSRn9Y+5G++y0= +github.com/bufbuild/buf v1.25.1/go.mod h1:UMPncXMWgrmIM+0QpwTEwjNr2SA0z2YIVZZsmNflvB4= +github.com/bufbuild/connect-go v1.9.0 h1:JIgAeNuFpo+SUPfU19Yt5TcWlznsN5Bv10/gI/6Pjoc= +github.com/bufbuild/connect-go v1.9.0/go.mod h1:CAIePUgkDR5pAFaylSMtNK45ANQjp9JvpluG20rhpV8= +github.com/bufbuild/connect-opentelemetry-go v0.4.0 h1:6JAn10SNqlQ/URhvRNGrIlczKw1wEXknBUUtmWqOiak= +github.com/bufbuild/connect-opentelemetry-go v0.4.0/go.mod h1:nwPXYoDOoc2DGyKE/6pT1Q9MPSi2Et2e6BieMD0l6WU= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= @@ -148,12 +150,13 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230622044456-2dfb8bc6bfbc h1:GGAiSNI47xNpWdO09+E0bZtpJIZMof9U8ZvTymq9S+c= -github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230622044456-2dfb8bc6bfbc/go.mod h1:XYUuhLfBtc7Nl1nHguPQQf6D4Xcm3idyIJ8+roe8Od4= +github.com/crossplane/crossplane-runtime v0.20.1 h1:xEYNL65wq3IA4NloSknH/n7F/GVKQd3QDpNWB4dFRks= +github.com/crossplane/crossplane-runtime v0.20.1/go.mod h1:FuKIC8Mg8hE2gIAMyf2wCPkxkFPz+VnMQiYWBq1/p5A= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= @@ -171,14 +174,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v24.0.2+incompatible h1:QdqR7znue1mtkXIJ+ruQMGQhpw2JzMJLRXp6zpzF6tM= -github.com/docker/cli v24.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.4+incompatible h1:Y3bYF9ekNTm2VFz5U/0BlMdJy73D+Y1iAAZ8l63Ydzw= +github.com/docker/cli v24.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= -github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI= +github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -206,8 +210,8 @@ github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= -github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= +github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -289,8 +293,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= -github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230617045147-2472cbbbf289 h1:wk0QZFyD9RapJgFdQGb8+5+RtNxJsrVYpdEHfTc3Q8g= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230617045147-2472cbbbf289/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= @@ -313,9 +317,10 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= -github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -359,6 +364,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 h1:2uT3aivO7NVpUPGcQX7RbHijHMyWix/yCnIrCWc+5co= @@ -379,8 +385,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= -github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -401,6 +407,7 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -433,10 +440,11 @@ github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= -github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runtime-spec v1.1.0-rc.3.0.20230610073135-48415de180cf h1:AGnwZS8lmjGxN2/XlzORiYESAk7HOlE3XI37uhIP9Vw= github.com/opencontainers/runtime-spec v1.1.0-rc.3.0.20230610073135-48415de180cf/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -473,6 +481,7 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -492,8 +501,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= -github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.3.1 h1:rnb9FgOEQRLLR8tgoD1mfjNjMhFeWRUk+a4b4j/GpUM= +github.com/tetratelabs/wazero v1.3.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= @@ -546,8 +555,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -585,8 +594,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -625,8 +634,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -695,6 +704,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -707,14 +717,14 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -726,8 +736,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -787,8 +797,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= -golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -860,8 +870,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -881,8 +891,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= -google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -898,8 +908,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -957,12 +967,14 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= -sigs.k8s.io/controller-tools v0.12.0 h1:TY6CGE6+6hzO7hhJFte65ud3cFmmZW947jajXkuDfBw= -sigs.k8s.io/controller-tools v0.12.0/go.mod h1:rXlpTfFHZMpZA8aGq9ejArgZiieHd+fkk/fTatY8A2M= +sigs.k8s.io/controller-tools v0.12.1 h1:GyQqxzH5wksa4n3YDIJdJJOopztR5VDM+7qsyg5yE4U= +sigs.k8s.io/controller-tools v0.12.1/go.mod h1:rXlpTfFHZMpZA8aGq9ejArgZiieHd+fkk/fTatY8A2M= sigs.k8s.io/e2e-framework v0.2.0 h1:gD6AWWAHFcHibI69E9TgkNFhh0mVwWtRCHy2RU057jQ= sigs.k8s.io/e2e-framework v0.2.0/go.mod h1:E6JXj/V4PIlb95jsn2WrNKG+Shb45xaaI7C0+BH4PL8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= +sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_function.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_function.go new file mode 100644 index 000000000..460074ec8 --- /dev/null +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_function.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFunctions implements FunctionInterface +type FakeFunctions struct { + Fake *FakePkgV1alpha1 +} + +var functionsResource = v1alpha1.SchemeGroupVersion.WithResource("functions") + +var functionsKind = v1alpha1.SchemeGroupVersion.WithKind("Function") + +// Get takes name of the function, and returns the corresponding function object, and an error if there is any. +func (c *FakeFunctions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Function, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(functionsResource, name), &v1alpha1.Function{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Function), err +} + +// List takes label and field selectors, and returns the list of Functions that match those selectors. +func (c *FakeFunctions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FunctionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(functionsResource, functionsKind, opts), &v1alpha1.FunctionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.FunctionList{ListMeta: obj.(*v1alpha1.FunctionList).ListMeta} + for _, item := range obj.(*v1alpha1.FunctionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested functions. +func (c *FakeFunctions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(functionsResource, opts)) +} + +// Create takes the representation of a function and creates it. Returns the server's representation of the function, and an error, if there is any. +func (c *FakeFunctions) Create(ctx context.Context, function *v1alpha1.Function, opts v1.CreateOptions) (result *v1alpha1.Function, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(functionsResource, function), &v1alpha1.Function{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Function), err +} + +// Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any. +func (c *FakeFunctions) Update(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (result *v1alpha1.Function, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(functionsResource, function), &v1alpha1.Function{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Function), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFunctions) UpdateStatus(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (*v1alpha1.Function, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(functionsResource, "status", function), &v1alpha1.Function{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Function), err +} + +// Delete takes name of the function and deletes it. Returns an error if one occurs. +func (c *FakeFunctions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(functionsResource, name, opts), &v1alpha1.Function{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFunctions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(functionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.FunctionList{}) + return err +} + +// Patch applies the patch and returns the patched function. +func (c *FakeFunctions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Function, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(functionsResource, name, pt, data, subresources...), &v1alpha1.Function{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Function), err +} diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_functionrevision.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_functionrevision.go new file mode 100644 index 000000000..24b681eac --- /dev/null +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_functionrevision.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFunctionRevisions implements FunctionRevisionInterface +type FakeFunctionRevisions struct { + Fake *FakePkgV1alpha1 +} + +var functionrevisionsResource = v1alpha1.SchemeGroupVersion.WithResource("functionrevisions") + +var functionrevisionsKind = v1alpha1.SchemeGroupVersion.WithKind("FunctionRevision") + +// Get takes name of the functionRevision, and returns the corresponding functionRevision object, and an error if there is any. +func (c *FakeFunctionRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FunctionRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(functionrevisionsResource, name), &v1alpha1.FunctionRevision{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FunctionRevision), err +} + +// List takes label and field selectors, and returns the list of FunctionRevisions that match those selectors. +func (c *FakeFunctionRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FunctionRevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(functionrevisionsResource, functionrevisionsKind, opts), &v1alpha1.FunctionRevisionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.FunctionRevisionList{ListMeta: obj.(*v1alpha1.FunctionRevisionList).ListMeta} + for _, item := range obj.(*v1alpha1.FunctionRevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested functionRevisions. +func (c *FakeFunctionRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(functionrevisionsResource, opts)) +} + +// Create takes the representation of a functionRevision and creates it. Returns the server's representation of the functionRevision, and an error, if there is any. +func (c *FakeFunctionRevisions) Create(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.CreateOptions) (result *v1alpha1.FunctionRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(functionrevisionsResource, functionRevision), &v1alpha1.FunctionRevision{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FunctionRevision), err +} + +// Update takes the representation of a functionRevision and updates it. Returns the server's representation of the functionRevision, and an error, if there is any. +func (c *FakeFunctionRevisions) Update(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (result *v1alpha1.FunctionRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(functionrevisionsResource, functionRevision), &v1alpha1.FunctionRevision{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FunctionRevision), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFunctionRevisions) UpdateStatus(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (*v1alpha1.FunctionRevision, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(functionrevisionsResource, "status", functionRevision), &v1alpha1.FunctionRevision{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FunctionRevision), err +} + +// Delete takes name of the functionRevision and deletes it. Returns an error if one occurs. +func (c *FakeFunctionRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(functionrevisionsResource, name, opts), &v1alpha1.FunctionRevision{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFunctionRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(functionrevisionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.FunctionRevisionList{}) + return err +} + +// Patch applies the patch and returns the patched functionRevision. +func (c *FakeFunctionRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FunctionRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(functionrevisionsResource, name, pt, data, subresources...), &v1alpha1.FunctionRevision{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FunctionRevision), err +} diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_pkg_client.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_pkg_client.go index ffc844254..29cbd0121 100644 --- a/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_pkg_client.go +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/fake/fake_pkg_client.go @@ -32,6 +32,14 @@ func (c *FakePkgV1alpha1) ControllerConfigs() v1alpha1.ControllerConfigInterface return &FakeControllerConfigs{c} } +func (c *FakePkgV1alpha1) Functions() v1alpha1.FunctionInterface { + return &FakeFunctions{c} +} + +func (c *FakePkgV1alpha1) FunctionRevisions() v1alpha1.FunctionRevisionInterface { + return &FakeFunctionRevisions{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakePkgV1alpha1) RESTClient() rest.Interface { diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/function.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/function.go new file mode 100644 index 000000000..7b16c6e36 --- /dev/null +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/function.go @@ -0,0 +1,184 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + scheme "github.com/crossplane/crossplane/internal/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// FunctionsGetter has a method to return a FunctionInterface. +// A group's client should implement this interface. +type FunctionsGetter interface { + Functions() FunctionInterface +} + +// FunctionInterface has methods to work with Function resources. +type FunctionInterface interface { + Create(ctx context.Context, function *v1alpha1.Function, opts v1.CreateOptions) (*v1alpha1.Function, error) + Update(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (*v1alpha1.Function, error) + UpdateStatus(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (*v1alpha1.Function, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Function, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FunctionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Function, err error) + FunctionExpansion +} + +// functions implements FunctionInterface +type functions struct { + client rest.Interface +} + +// newFunctions returns a Functions +func newFunctions(c *PkgV1alpha1Client) *functions { + return &functions{ + client: c.RESTClient(), + } +} + +// Get takes name of the function, and returns the corresponding function object, and an error if there is any. +func (c *functions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Function, err error) { + result = &v1alpha1.Function{} + err = c.client.Get(). + Resource("functions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Functions that match those selectors. +func (c *functions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FunctionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.FunctionList{} + err = c.client.Get(). + Resource("functions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested functions. +func (c *functions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("functions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a function and creates it. Returns the server's representation of the function, and an error, if there is any. +func (c *functions) Create(ctx context.Context, function *v1alpha1.Function, opts v1.CreateOptions) (result *v1alpha1.Function, err error) { + result = &v1alpha1.Function{} + err = c.client.Post(). + Resource("functions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(function). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any. +func (c *functions) Update(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (result *v1alpha1.Function, err error) { + result = &v1alpha1.Function{} + err = c.client.Put(). + Resource("functions"). + Name(function.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(function). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *functions) UpdateStatus(ctx context.Context, function *v1alpha1.Function, opts v1.UpdateOptions) (result *v1alpha1.Function, err error) { + result = &v1alpha1.Function{} + err = c.client.Put(). + Resource("functions"). + Name(function.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(function). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the function and deletes it. Returns an error if one occurs. +func (c *functions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("functions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *functions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("functions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched function. +func (c *functions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Function, err error) { + result = &v1alpha1.Function{} + err = c.client.Patch(pt). + Resource("functions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/functionrevision.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/functionrevision.go new file mode 100644 index 000000000..263ee91a7 --- /dev/null +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/functionrevision.go @@ -0,0 +1,184 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + scheme "github.com/crossplane/crossplane/internal/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// FunctionRevisionsGetter has a method to return a FunctionRevisionInterface. +// A group's client should implement this interface. +type FunctionRevisionsGetter interface { + FunctionRevisions() FunctionRevisionInterface +} + +// FunctionRevisionInterface has methods to work with FunctionRevision resources. +type FunctionRevisionInterface interface { + Create(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.CreateOptions) (*v1alpha1.FunctionRevision, error) + Update(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (*v1alpha1.FunctionRevision, error) + UpdateStatus(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (*v1alpha1.FunctionRevision, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FunctionRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FunctionRevisionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FunctionRevision, err error) + FunctionRevisionExpansion +} + +// functionRevisions implements FunctionRevisionInterface +type functionRevisions struct { + client rest.Interface +} + +// newFunctionRevisions returns a FunctionRevisions +func newFunctionRevisions(c *PkgV1alpha1Client) *functionRevisions { + return &functionRevisions{ + client: c.RESTClient(), + } +} + +// Get takes name of the functionRevision, and returns the corresponding functionRevision object, and an error if there is any. +func (c *functionRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FunctionRevision, err error) { + result = &v1alpha1.FunctionRevision{} + err = c.client.Get(). + Resource("functionrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FunctionRevisions that match those selectors. +func (c *functionRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FunctionRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.FunctionRevisionList{} + err = c.client.Get(). + Resource("functionrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested functionRevisions. +func (c *functionRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("functionrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a functionRevision and creates it. Returns the server's representation of the functionRevision, and an error, if there is any. +func (c *functionRevisions) Create(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.CreateOptions) (result *v1alpha1.FunctionRevision, err error) { + result = &v1alpha1.FunctionRevision{} + err = c.client.Post(). + Resource("functionrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(functionRevision). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a functionRevision and updates it. Returns the server's representation of the functionRevision, and an error, if there is any. +func (c *functionRevisions) Update(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (result *v1alpha1.FunctionRevision, err error) { + result = &v1alpha1.FunctionRevision{} + err = c.client.Put(). + Resource("functionrevisions"). + Name(functionRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(functionRevision). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *functionRevisions) UpdateStatus(ctx context.Context, functionRevision *v1alpha1.FunctionRevision, opts v1.UpdateOptions) (result *v1alpha1.FunctionRevision, err error) { + result = &v1alpha1.FunctionRevision{} + err = c.client.Put(). + Resource("functionrevisions"). + Name(functionRevision.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(functionRevision). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the functionRevision and deletes it. Returns an error if one occurs. +func (c *functionRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("functionrevisions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *functionRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("functionrevisions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched functionRevision. +func (c *functionRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FunctionRevision, err error) { + result = &v1alpha1.FunctionRevision{} + err = c.client.Patch(pt). + Resource("functionrevisions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/generated_expansion.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/generated_expansion.go index f6fcd2b95..7ffe4b2d4 100644 --- a/internal/client/clientset/versioned/typed/pkg/v1alpha1/generated_expansion.go +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/generated_expansion.go @@ -19,3 +19,7 @@ limitations under the License. package v1alpha1 type ControllerConfigExpansion interface{} + +type FunctionExpansion interface{} + +type FunctionRevisionExpansion interface{} diff --git a/internal/client/clientset/versioned/typed/pkg/v1alpha1/pkg_client.go b/internal/client/clientset/versioned/typed/pkg/v1alpha1/pkg_client.go index 8679b8b28..f48314a0f 100644 --- a/internal/client/clientset/versioned/typed/pkg/v1alpha1/pkg_client.go +++ b/internal/client/clientset/versioned/typed/pkg/v1alpha1/pkg_client.go @@ -29,6 +29,8 @@ import ( type PkgV1alpha1Interface interface { RESTClient() rest.Interface ControllerConfigsGetter + FunctionsGetter + FunctionRevisionsGetter } // PkgV1alpha1Client is used to interact with features provided by the pkg.crossplane.io group. @@ -40,6 +42,14 @@ func (c *PkgV1alpha1Client) ControllerConfigs() ControllerConfigInterface { return newControllerConfigs(c) } +func (c *PkgV1alpha1Client) Functions() FunctionInterface { + return newFunctions(c) +} + +func (c *PkgV1alpha1Client) FunctionRevisions() FunctionRevisionInterface { + return newFunctionRevisions(c) +} + // NewForConfig creates a new PkgV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/internal/controller/apiextensions/claim/configurator.go b/internal/controller/apiextensions/claim/configurator.go index 4ed4807fb..5f36b530f 100644 --- a/internal/controller/apiextensions/claim/configurator.go +++ b/internal/controller/apiextensions/claim/configurator.go @@ -121,9 +121,10 @@ func (c *APIDryRunCompositeConfigurator) Configure(ctx context.Context, cm resou // CompositionRevision is a special field which needs to be propagated // based on the Update policy. If the policy is `Manual`, we need to - // overwrite the composite's value with the claim's + // remove CompositionRevisionRef from wellKnownClaimFields, so it + // does not get filtered out and is set correctly in composite if cp.GetCompositionUpdatePolicy() != nil && *cp.GetCompositionUpdatePolicy() == xpv1.UpdateManual { - cp.SetCompositionRevisionReference(cm.GetCompositionRevisionReference()) + delete(wellKnownClaimFields, xcrd.CompositionRevisionRef) } claimSpecFilter := xcrd.GetPropFields(wellKnownClaimFields) diff --git a/internal/controller/apiextensions/claim/configurator_test.go b/internal/controller/apiextensions/claim/configurator_test.go index 526f3f267..976f91eaf 100644 --- a/internal/controller/apiextensions/claim/configurator_test.go +++ b/internal/controller/apiextensions/claim/configurator_test.go @@ -372,6 +372,222 @@ func TestCompositeConfigure(t *testing.T) { }, }, }, + "UpdatePolicyAutomatic": { + reason: "CompositionRevision of composite should NOT be set by the claim", + args: args{ + cm: &claim.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]any{ + "namespace": ns, + "name": name, + }, + "spec": map[string]any{ + "resourceRef": "ref", + "writeConnectionSecretToRef": "ref", + "compositionUpdatePolicy": "Automatic", + "compositionRevisionRef": map[string]any{ + "name": "newref", + }, + }, + "status": map[string]any{ + "previousCoolness": 23, + "conditions": []map[string]any{ + { + "type": "someCondition", + }, + }, + }, + }, + }, + }, + cp: &composite.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "metadata": map[string]any{ + "namespace": ns, + "name": name + "-12345", + "creationTimestamp": func() string { + b, _ := now.MarshalJSON() + return strings.Trim(string(b), "\"") + }(), + "labels": map[string]any{ + xcrd.LabelKeyClaimNamespace: ns, + xcrd.LabelKeyClaimName: name, + }, + }, + "spec": map[string]any{ + "compositionUpdatePolicy": "Automatic", + "claimRef": map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "namespace": ns, + "name": name, + }, + }, + "status": map[string]any{ + "previousCoolness": 28, + "conditions": []map[string]any{ + { + "type": "otherCondition", + }, + }, + }, + }, + }, + }, + }, + want: want{ + cp: &composite.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "metadata": map[string]any{ + "namespace": ns, + "name": name + "-12345", + "creationTimestamp": func() string { + b, _ := now.MarshalJSON() + return strings.Trim(string(b), "\"") + }(), + "labels": map[string]any{ + xcrd.LabelKeyClaimNamespace: ns, + xcrd.LabelKeyClaimName: name, + }, + }, + "spec": map[string]any{ + "compositionUpdatePolicy": "Automatic", + "claimRef": map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "namespace": ns, + "name": name, + }, + }, + "status": map[string]any{ + "previousCoolness": 28, + "conditions": []map[string]any{ + { + "type": "otherCondition", + }, + }, + }, + }, + }, + }, + }, + }, + "UpdatePolicyManual": { + reason: "CompositionRevision of composite should be overwritten by the claim", + args: args{ + cm: &claim.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]any{ + "namespace": ns, + "name": name, + }, + "spec": map[string]any{ + "resourceRef": "ref", + "writeConnectionSecretToRef": "ref", + "compositionUpdatePolicy": "Manual", + "compositionRevisionRef": map[string]any{ + "name": "newref", + }, + }, + "status": map[string]any{ + "previousCoolness": 23, + "conditions": []map[string]any{ + { + "type": "someCondition", + }, + }, + }, + }, + }, + }, + cp: &composite.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "metadata": map[string]any{ + "namespace": ns, + "name": name + "-12345", + "creationTimestamp": func() string { + b, _ := now.MarshalJSON() + return strings.Trim(string(b), "\"") + }(), + "labels": map[string]any{ + xcrd.LabelKeyClaimNamespace: ns, + xcrd.LabelKeyClaimName: name, + }, + }, + "spec": map[string]any{ + "compositionUpdatePolicy": "Manual", + "compositionRevisionRef": map[string]any{ + "name": "oldref", + }, + "claimRef": map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "namespace": ns, + "name": name, + }, + }, + "status": map[string]any{ + "previousCoolness": 28, + "conditions": []map[string]any{ + { + "type": "otherCondition", + }, + }, + }, + }, + }, + }, + }, + want: want{ + cp: &composite.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]any{ + "metadata": map[string]any{ + "namespace": ns, + "name": name + "-12345", + "creationTimestamp": func() string { + b, _ := now.MarshalJSON() + return strings.Trim(string(b), "\"") + }(), + "labels": map[string]any{ + xcrd.LabelKeyClaimNamespace: ns, + xcrd.LabelKeyClaimName: name, + }, + }, + "spec": map[string]any{ + "compositionUpdatePolicy": "Manual", + "compositionRevisionRef": map[string]any{ + "name": "newref", + }, + "claimRef": map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "namespace": ns, + "name": name, + }, + }, + "status": map[string]any{ + "previousCoolness": 28, + "conditions": []map[string]any{ + { + "type": "otherCondition", + }, + }, + }, + }, + }, + }, + }, + }, } for name, tc := range cases { @@ -403,7 +619,6 @@ func TestClaimConfigure(t *testing.T) { type want struct { cm resource.CompositeClaim err error - cp resource.Composite } cases := map[string]struct { @@ -727,7 +942,7 @@ func TestClaimConfigure(t *testing.T) { }, }, "UpdatePolicyManual": { - reason: "CompositionRevision of composite should be overwritten by the claim", + reason: "CompositionRevision of claim should NOT overwritten by the composite", args: args{ client: test.NewMockClient(), cm: &claim.Unstructured{ @@ -810,32 +1025,6 @@ func TestClaimConfigure(t *testing.T) { }, }, }, - cp: &composite.Unstructured{ - Unstructured: unstructured.Unstructured{ - Object: map[string]any{ - "metadata": map[string]any{ - "namespace": ns, - "name": name + "-12345", - }, - "spec": map[string]any{ - "resourceRefs": "ref", - "claimRef": "ref", - "compositionUpdatePolicy": "Manual", - "compositionRevisionRef": map[string]any{ - "name": "oldref", - }, - }, - "status": map[string]any{ - "previousCoolness": 28, - "conditions": []map[string]any{ - { - "type": "otherCondition", - }, - }, - }, - }, - }, - }, }, }, "UpdatePolicyAutomatic": { diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index 2bd01d049..ca549764c 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -520,17 +521,21 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, cm), errUpdateClaimStatus) } - if err := r.client.Apply(ctx, cp); err != nil { + err := r.client.Apply(ctx, cp, resource.AllowUpdateIf(func(old, obj runtime.Object) bool { return !cmp.Equal(old, obj) })) + switch { + case resource.IsNotAllowed(err): + log.Debug("Skipped no-op composite resource apply") + case err != nil: log.Debug(errApplyComposite, "error", err) err = errors.Wrap(err, errApplyComposite) record.Event(cm, event.Warning(reasonCompositeConfigure, err)) cm.SetConditions(xpv1.ReconcileError(err)) return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, cm), errUpdateClaimStatus) + default: + log.Debug("Successfully applied composite resource") + record.Event(cm, event.Normal(reasonCompositeConfigure, "Successfully applied composite resource")) } - log.Debug("Successfully applied composite resource") - record.Event(cm, event.Normal(reasonCompositeConfigure, "Successfully applied composite resource")) - if err := r.claim.Configure(ctx, cm, cp); err != nil { log.Debug(errConfigureClaim, "error", err) err = errors.Wrap(err, errConfigureClaim) diff --git a/internal/controller/apiextensions/composite/composition_ptf.go b/internal/controller/apiextensions/composite/composition_ptf.go index f01ac8dd9..4dded326b 100644 --- a/internal/controller/apiextensions/composite/composition_ptf.go +++ b/internal/controller/apiextensions/composite/composition_ptf.go @@ -649,7 +649,7 @@ type ContainerFunctionRunnerOption func(ctx context.Context, fn *v1.ContainerFun // 2. Loads credentials from the supplied service account's image pull secrets. // 3. Loads credentials from the function's image pull secrets. // 4. Loads credentials using the GKE, EKS, or AKS credentials helper. -func WithKubernetesAuthentication(c client.Reader, namespace, serviceAccount string) ContainerFunctionRunnerOption { +func WithKubernetesAuthentication(c client.Reader, namespace, serviceAccount, registry string) ContainerFunctionRunnerOption { return func(ctx context.Context, fn *v1.ContainerFunction, r *fnv1alpha1.RunFunctionRequest) error { sa := &corev1.ServiceAccount{} @@ -677,7 +677,7 @@ func WithKubernetesAuthentication(c client.Reader, namespace, serviceAccount str return errors.Wrap(err, errNewKeychain) } - ref, err := name.ParseReference(fn.Image) + ref, err := name.ParseReference(fn.Image, name.WithDefaultRegistry(registry)) if err != nil { return errors.Wrap(err, errParseImage) } diff --git a/internal/controller/apiextensions/composite/composition_ptf_test.go b/internal/controller/apiextensions/composite/composition_ptf_test.go index 0a71b3a42..fa928a38d 100644 --- a/internal/controller/apiextensions/composite/composition_ptf_test.go +++ b/internal/controller/apiextensions/composite/composition_ptf_test.go @@ -1117,6 +1117,7 @@ func TestWithKubernetesAuthentication(t *testing.T) { c client.Reader namespace string serviceAccount string + registry string } type args struct { ctx context.Context @@ -1188,6 +1189,7 @@ func TestWithKubernetesAuthentication(t *testing.T) { return nil }, }, + registry: "index.docker.io", }, args: args{ fn: &v1.ContainerFunction{ @@ -1211,7 +1213,7 @@ func TestWithKubernetesAuthentication(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - err := WithKubernetesAuthentication(tc.params.c, tc.params.namespace, tc.params.serviceAccount)(tc.args.ctx, tc.args.fn, tc.args.r) + err := WithKubernetesAuthentication(tc.params.c, tc.params.namespace, tc.params.serviceAccount, tc.params.registry)(tc.args.ctx, tc.args.fn, tc.args.r) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nWithKubernetesAuthentication(...): -want error, +got error:\n%s", tc.reason, diff) diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 0b411e469..66c66f71c 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -24,7 +24,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "reflect" "regexp" "strconv" "strings" @@ -194,7 +193,7 @@ func ResolveMap(t v1.MapTransform, input any) (any, error) { } return val, nil default: - return nil, errors.Errorf(errFmtMapTypeNotSupported, reflect.TypeOf(input).String()) + return nil, errors.Errorf(errFmtMapTypeNotSupported, fmt.Sprintf("%T", input)) } } @@ -247,7 +246,7 @@ func matchesLiteral(p v1.MatchTransformPattern, input any) (bool, error) { } inputStr, ok := input.(string) if !ok { - return false, errors.Errorf(errFmtMatchInputTypeInvalid, reflect.TypeOf(input).String()) + return false, errors.Errorf(errFmtMatchInputTypeInvalid, fmt.Sprintf("%T", input)) } return inputStr == *p.Literal, nil } @@ -265,7 +264,7 @@ func matchesRegexp(p v1.MatchTransformPattern, input any) (bool, error) { } inputStr, ok := input.(string) if !ok { - return false, errors.Errorf(errFmtMatchInputTypeInvalid, reflect.TypeOf(input).String()) + return false, errors.Errorf(errFmtMatchInputTypeInvalid, fmt.Sprintf("%T", input)) } return re.MatchString(inputStr), nil } @@ -380,7 +379,7 @@ func ResolveConvert(t v1.ConvertTransform, input any) (any, error) { return nil, err } - from := v1.TransformIOType(reflect.TypeOf(input).String()) + from := v1.TransformIOType(fmt.Sprintf("%T", input)) if !from.IsValid() { return nil, errors.Errorf(errFmtConvertInputTypeNotSupported, input) } diff --git a/internal/controller/apiextensions/composite/environment/fetcher.go b/internal/controller/apiextensions/composite/environment/fetcher.go index fa112b244..b36b62d3a 100644 --- a/internal/controller/apiextensions/composite/environment/fetcher.go +++ b/internal/controller/apiextensions/composite/environment/fetcher.go @@ -50,7 +50,7 @@ func NewNilEnvironmentFetcher() *NilEnvironmentFetcher { type NilEnvironmentFetcher struct{} // Fetch always returns nil. -func (f *NilEnvironmentFetcher) Fetch(_ context.Context, _ resource.Composite) (*Environment, error) { +func (f *NilEnvironmentFetcher) Fetch(_ context.Context, _ resource.Composite, _ bool) (*Environment, error) { return nil, nil } @@ -77,7 +77,7 @@ type APIEnvironmentFetcher struct { // // Note: The `.Data` path is trimmed from the result so its necessary to include // it in patches. -func (f *APIEnvironmentFetcher) Fetch(ctx context.Context, cr resource.Composite) (*Environment, error) { +func (f *APIEnvironmentFetcher) Fetch(ctx context.Context, cr resource.Composite, required bool) (*Environment, error) { var env *Environment // Return an empty environment if the XR references no EnvironmentConfigs. @@ -89,7 +89,7 @@ func (f *APIEnvironmentFetcher) Fetch(ctx context.Context, cr resource.Composite } } else { var err error - env, err = f.fetchEnvironment(ctx, cr) + env, err = f.fetchEnvironment(ctx, cr, required) if err != nil { return nil, err } @@ -104,7 +104,7 @@ func (f *APIEnvironmentFetcher) Fetch(ctx context.Context, cr resource.Composite return env, nil } -func (f *APIEnvironmentFetcher) fetchEnvironment(ctx context.Context, cr resource.Composite) (*Environment, error) { +func (f *APIEnvironmentFetcher) fetchEnvironment(ctx context.Context, cr resource.Composite, required bool) (*Environment, error) { refs := cr.GetEnvironmentConfigReferences() loadedConfigs := []v1alpha1.EnvironmentConfig{} for _, ref := range refs { @@ -112,8 +112,13 @@ func (f *APIEnvironmentFetcher) fetchEnvironment(ctx context.Context, cr resourc nn := types.NamespacedName{ Name: ref.Name, } - if err := f.kube.Get(ctx, nn, &config); err != nil { - return nil, errors.Wrap(err, errGetEnvironmentConfig) + err := f.kube.Get(ctx, nn, &config) + if err != nil { + // skip if resolution policy is optional + if required { + return nil, errors.Wrap(err, errGetEnvironmentConfig) + } + continue } loadedConfigs = append(loadedConfigs, config) } diff --git a/internal/controller/apiextensions/composite/environment/fetcher_test.go b/internal/controller/apiextensions/composite/environment/fetcher_test.go index 0dbc8e245..caf267d47 100644 --- a/internal/controller/apiextensions/composite/environment/fetcher_test.go +++ b/internal/controller/apiextensions/composite/environment/fetcher_test.go @@ -26,6 +26,7 @@ import ( extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/resource/fake" @@ -124,8 +125,9 @@ func TestFetch(t *testing.T) { } type args struct { - kube client.Client - cr *fake.Composite + kube client.Client + cr *fake.Composite + required *bool } type want struct { env *Environment @@ -213,12 +215,33 @@ func TestFetch(t *testing.T) { err: errors.Wrapf(errBoom, errGetEnvironmentConfig), }, }, + "NoErrorOnKubeGetErrorIfResolutionNotRequired": { + reason: "It should omit EnvironmentConfig if getting a EnvironmentConfig from a reference fails", + args: args{ + kube: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, + cr: composite( + withEnvironmentRefs( + corev1.ObjectReference{Name: "a"}, + ), + ), + required: pointer.Bool(false), + }, + want: want{ + env: makeEnvironment(map[string]interface{}{}), + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { f := NewAPIEnvironmentFetcher(tc.args.kube) - got, err := f.Fetch(context.Background(), tc.args.cr) + required := true + if tc.args.required != nil { + required = *tc.args.required + } + got, err := f.Fetch(context.Background(), tc.args.cr, required) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) diff --git a/internal/controller/apiextensions/composite/environment/selector.go b/internal/controller/apiextensions/composite/environment/selector.go index 8401d7b53..b78bb8f3c 100644 --- a/internal/controller/apiextensions/composite/environment/selector.go +++ b/internal/controller/apiextensions/composite/environment/selector.go @@ -19,6 +19,8 @@ package environment import ( "context" "fmt" + "reflect" + "sort" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -29,18 +31,20 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - v1alpha1 "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" + "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" ) const ( errFmtReferenceEnvironmentConfig = "failed to build reference at index %d" errFmtResolveLabelValue = "failed to resolve value for label at index %d" errListEnvironmentConfigs = "failed to list environments" - errListEnvironmentConfigsNoResult = "no EnvironmentConfig found that matches labels" errFmtInvalidEnvironmentSourceType = "invalid source type '%s'" - - errFmtInvalidLabelMatcherType = "invalid label matcher type '%s'" - errFmtRequiredField = "%s is required by type %s" + errFmtInvalidLabelMatcherType = "invalid label matcher type '%s'" + errFmtRequiredField = "%s is required by type %s" + errFmtUnknownSelectorMode = "unknown mode '%s'" + errFmtSortNotMatchingTypes = "not matching types, got %[1]v (%[1]T), expected %[2]v" + errFmtSortUnknownType = "unexpected type %T" + errFmtFoundMultipleInSingleMode = "only 1 EnvironmentConfig can be selected in Single mode, found: %d" ) // NewNoopEnvironmentSelector creates a new NoopEnvironmentSelector. @@ -71,25 +75,28 @@ type APIEnvironmentSelector struct { // SelectEnvironment for cr using the configuration defined in comp. // The computed list of EnvironmentConfig references will be stored in cr. func (s *APIEnvironmentSelector) SelectEnvironment(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { - // noop if EnvironmentConfig references are already computed - if len(cr.GetEnvironmentConfigReferences()) > 0 { - return nil - } - if rev.Spec.Environment == nil || rev.Spec.Environment.EnvironmentConfigs == nil { + if !rev.Spec.Environment.ShouldResolve(cr.GetEnvironmentConfigReferences()) { return nil } - refs := make([]corev1.ObjectReference, len(rev.Spec.Environment.EnvironmentConfigs)) + refs := make([]corev1.ObjectReference, 0, len(rev.Spec.Environment.EnvironmentConfigs)) for i, src := range rev.Spec.Environment.EnvironmentConfigs { switch src.Type { case v1.EnvironmentSourceTypeReference: - refs[i] = s.buildEnvironmentConfigRefFromRef(src.Ref) + refs = append( + refs, + s.buildEnvironmentConfigRefFromRef(src.Ref), + ) case v1.EnvironmentSourceTypeSelector: - r, err := s.buildEnvironmentConfigRefFromSelector(ctx, cr, src.Selector) + ec, err := s.lookUpConfigs(ctx, cr, src.Selector.MatchLabels) if err != nil { return errors.Wrapf(err, errFmtReferenceEnvironmentConfig, i) } - refs[i] = r + r, err := s.buildEnvironmentConfigRefFromSelector(ec, src.Selector) + if err != nil { + return errors.Wrapf(err, errFmtReferenceEnvironmentConfig, i) + } + refs = append(refs, r...) default: return errors.Errorf(errFmtInvalidEnvironmentSourceType, string(src.Type)) } @@ -106,28 +113,132 @@ func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromRef(ref *v1.Enviro } } -func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(ctx context.Context, cr resource.Composite, selector *v1.EnvironmentSourceSelector) (corev1.ObjectReference, error) { - matchLabels := make(client.MatchingLabels, len(selector.MatchLabels)) - for i, m := range selector.MatchLabels { +func (s *APIEnvironmentSelector) lookUpConfigs(ctx context.Context, cr resource.Composite, ml []v1.EnvironmentSourceSelectorLabelMatcher) (*v1alpha1.EnvironmentConfigList, error) { + matchLabels := make(client.MatchingLabels, len(ml)) + for i, m := range ml { val, err := ResolveLabelValue(m, cr) if err != nil { - return corev1.ObjectReference{}, errors.Wrapf(err, errFmtResolveLabelValue, i) + return nil, errors.Wrapf(err, errFmtResolveLabelValue, i) } matchLabels[m.Key] = val } res := &v1alpha1.EnvironmentConfigList{} if err := s.kube.List(ctx, res, matchLabels); err != nil { - return corev1.ObjectReference{}, errors.Wrap(err, errListEnvironmentConfigs) + return nil, errors.Wrap(err, errListEnvironmentConfigs) } - if len(res.Items) == 0 { - return corev1.ObjectReference{}, errors.New(errListEnvironmentConfigsNoResult) + return res, nil +} + +func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alpha1.EnvironmentConfigList, selector *v1.EnvironmentSourceSelector) ([]corev1.ObjectReference, error) { + ec := make([]v1alpha1.EnvironmentConfig, 0) + + if len(cl.Items) == 0 { + return []corev1.ObjectReference{}, nil } - envConfig := res.Items[0] - return corev1.ObjectReference{ - Name: envConfig.Name, - Kind: v1alpha1.EnvironmentConfigKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, nil + + switch selector.Mode { + case v1.EnvironmentSourceSelectorSingleMode: + switch len(cl.Items) { + case 1: + ec = append(ec, cl.Items[0]) + default: + return nil, errors.Errorf(errFmtFoundMultipleInSingleMode, len(cl.Items)) + } + case v1.EnvironmentSourceSelectorMultiMode: + err := sortConfigs(cl.Items, selector.SortByFieldPath) + if err != nil { + return nil, err + } + + if selector.MaxMatch != nil { + ec = append(ec, cl.Items[:*selector.MaxMatch]...) + break + } + ec = append(ec, cl.Items...) + + default: + // should never happen + return nil, errors.Errorf(errFmtUnknownSelectorMode, selector.Mode) + } + + envConfigs := make([]corev1.ObjectReference, len(ec)) + for i, v := range ec { + envConfigs[i] = corev1.ObjectReference{ + Name: v.Name, + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + } + } + + return envConfigs, nil +} + +func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { //nolint:gocyclo // TODO(phisco): refactor + p := make([]struct { + ec v1alpha1.EnvironmentConfig + val any + }, len(ec)) + + var valsKind reflect.Kind + for i := 0; i < len(ec); i++ { + m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&ec[i]) + if err != nil { + return err + } + + val, err := fieldpath.Pave(m).GetValue(f) + if err != nil { + return err + } + + var vt reflect.Kind + if val != nil { + vt = reflect.TypeOf(val).Kind() + } + + // check only vt1 as vt1 == vt2 + switch vt { //nolint:exhaustive // we only support these types + case reflect.String, reflect.Int64, reflect.Float64: + // ok + default: + return errors.Errorf(errFmtSortUnknownType, val) + } + + if i == 0 { + valsKind = vt + } else if vt != valsKind { + // compare with previous values' kind + return errors.Errorf(errFmtSortNotMatchingTypes, val, valsKind) + } + + p[i].ec = ec[i] + p[i].val = val + } + + var err error + sort.Slice(p, func(i, j int) bool { + vali, valj := p[i].val, p[j].val + switch valsKind { //nolint:exhaustive // we only support these types + case reflect.Float64: + return vali.(float64) < valj.(float64) + case reflect.Int64: + return vali.(int64) < valj.(int64) + case reflect.String: + return vali.(string) < valj.(string) + default: + // should never happen + err = errors.Errorf(errFmtSortUnknownType, valsKind) + return false + } + }) + if err != nil { + return err + } + + for i := 0; i < len(ec); i++ { + ec[i] = p[i].ec + } + return nil } // ResolveLabelValue from a EnvironmentSourceSelectorLabelMatcher and an Object. diff --git a/internal/controller/apiextensions/composite/environment/selector_test.go b/internal/controller/apiextensions/composite/environment/selector_test.go index 207614ce4..24cf4af65 100644 --- a/internal/controller/apiextensions/composite/environment/selector_test.go +++ b/internal/controller/apiextensions/composite/environment/selector_test.go @@ -17,11 +17,15 @@ package environment import ( "context" + "encoding/json" + "fmt" + "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -75,6 +79,18 @@ func TestSelect(t *testing.T) { } } + makeJSON := func(m map[string]interface{}) map[string]extv1.JSON { + raw, err := json.Marshal(m) + if err != nil { + t.Fatal(err) + } + res := map[string]extv1.JSON{} + if err := json.Unmarshal(raw, &res); err != nil { + t.Fatal(err) + } + return res + } + cases := map[string]struct { reason string args args @@ -145,8 +161,8 @@ func TestSelect(t *testing.T) { ), }, }, - "RefForLabelSelectedObject": { - reason: "It should create a name reference for the first selected EnvironmentConfig that matches the labels.", + "RefForLabelSelectedObjects": { + reason: "It should create a name reference for selected EnvironmentConfigs that match the labels.", args: args{ kube: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { @@ -154,7 +170,18 @@ func TestSelect(t *testing.T) { list.Items = []v1alpha1.EnvironmentConfig{ { ObjectMeta: metav1.ObjectMeta{ - Name: "test", + Name: "test-1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + Labels: map[string]string{ + "foo": "bar", + }, }, }, } @@ -163,7 +190,6 @@ func TestSelect(t *testing.T) { }, cr: composite( withName("test-composite"), - withEnvironmentRefs(environmentConfigRef("test")), ), rev: &v1.CompositionRevision{ Spec: v1.CompositionRevisionSpec{ @@ -172,6 +198,8 @@ func TestSelect(t *testing.T) { { Type: v1.EnvironmentSourceTypeSelector, Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "metadata.name", MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ { Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, @@ -189,7 +217,7 @@ func TestSelect(t *testing.T) { want: want{ cr: composite( withName("test-composite"), - withEnvironmentRefs(environmentConfigRef("test")), + withEnvironmentRefs(environmentConfigRef("test-1"), environmentConfigRef("test-2")), ), }, }, @@ -223,6 +251,7 @@ func TestSelect(t *testing.T) { { Type: v1.EnvironmentSourceTypeSelector, Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorSingleMode, MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ { Type: v1.EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, @@ -245,7 +274,7 @@ func TestSelect(t *testing.T) { }, }, "RefForFirstLabelSelectedObject": { - reason: "It should create a name reference for the first selected EnvironmentConfig that matches the labels.", + reason: "It should create a name reference for the single selected EnvironmentConfig that matches the labels.", args: args{ kube: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { @@ -253,21 +282,64 @@ func TestSelect(t *testing.T) { list.Items = []v1alpha1.EnvironmentConfig{ { ObjectMeta: metav1.ObjectMeta{ - Name: "test", + Name: "test-1", + }, + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorSingleMode, + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs(environmentConfigRef("test-1")), + ), + }, + }, + "ErrorOnMultipleObjectsInSingleMode": { + reason: "It should return an error if more than 1 EnvironmentConfigs match the labels.", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", }, }, { ObjectMeta: metav1.ObjectMeta{ - Name: "not-this-one", + Name: "test-not-this-one", }, }, } return nil }), }, - cr: composite( - withEnvironmentRefs(environmentConfigRef("test")), - ), + cr: composite(), rev: &v1.CompositionRevision{ Spec: v1.CompositionRevisionSpec{ Environment: &v1.EnvironmentConfiguration{ @@ -275,8 +347,10 @@ func TestSelect(t *testing.T) { { Type: v1.EnvironmentSourceTypeSelector, Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorSingleMode, MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, Key: "foo", Value: pointer.String("bar"), }, @@ -289,9 +363,8 @@ func TestSelect(t *testing.T) { }, }, want: want{ - cr: composite( - withEnvironmentRefs(environmentConfigRef("test")), - ), + cr: composite(), + err: errors.Wrap(fmt.Errorf(errFmtFoundMultipleInSingleMode, 2), "failed to build reference at index 0"), }, }, "RefsInOrder": { @@ -368,8 +441,8 @@ func TestSelect(t *testing.T) { err: errors.Wrapf(errors.Wrap(errBoom, errListEnvironmentConfigs), errFmtReferenceEnvironmentConfig, 0), }, }, - "ErrorOnKubeListEmpty": { - reason: "It should return an error if kube.List returns an empty list.", + "NoReferenceOnKubeListEmpty": { + reason: "It should return an empty list of references if kube.List returns an empty list.", args: args{ kube: &test.MockClient{ MockList: test.NewMockListFn(nil), @@ -398,9 +471,8 @@ func TestSelect(t *testing.T) { }, want: want{ cr: composite( - withEnvironmentRefs(), + withEnvironmentRefs([]corev1.ObjectReference{}...), ), - err: errors.Wrapf(errors.New(errListEnvironmentConfigsNoResult), errFmtReferenceEnvironmentConfig, 0), }, }, "ErrorOnInvalidLabelValueFieldPath": { @@ -438,6 +510,555 @@ func TestSelect(t *testing.T) { err: errors.Wrapf(errors.Wrapf(errors.New("wrong: no such field"), errFmtResolveLabelValue, 0), errFmtReferenceEnvironmentConfig, 0), }, }, + "AllRefsSortedInMultiMode": { + reason: "It should return complete list of references sorted by metadata.name", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-4", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-3", + }, + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "metadata.name", + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs([]corev1.ObjectReference{ + { + Name: "test-1", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + + Name: "test-2", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + + Name: "test-3", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-4", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }...), + ), + }, + }, + "MaxMatchRefsSortedInMultiMode": { + reason: "It should return limited list of references sorted by specified annotation", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + Annotations: map[string]string{ + "sort.by/weight": "2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Annotations: map[string]string{ + "sort.by/weight": "1", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-4", + Annotations: map[string]string{ + "sort.by/weight": "4", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-3", + Annotations: map[string]string{ + "sort.by/weight": "3", + }, + }, + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "metadata.annotations[sort.by/weight]", + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs([]corev1.ObjectReference{ + { + Name: "test-1", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-2", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-3", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-4", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }...), + ), + }, + }, + "MaxMatchRefsSortedByFloatInMultiMode": { + reason: "It should return limited list of references sorted by float values", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + Data: makeJSON( + map[string]interface{}{ + "float/weight": float64(1.2), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + Data: makeJSON( + map[string]interface{}{ + "float/weight": float64(1.1), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-4", + }, + Data: makeJSON( + map[string]interface{}{ + "float/weight": float64(1.4), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-3", + }, + Data: makeJSON( + map[string]interface{}{ + "float/weight": float64(1.3), + }, + ), + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "data[float/weight]", + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs([]corev1.ObjectReference{ + { + Name: "test-1", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-2", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-3", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + Name: "test-4", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }...), + ), + }, + }, + "MaxMatchRefsSortedByIntInMultiMode": { + reason: "It should return limited list of references sorted by int values", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": int64(2), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": int64(1), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-3", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": int64(3), + }, + ), + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + MaxMatch: pointer.Uint64(3), + SortByFieldPath: "data[int/weight]", + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs([]corev1.ObjectReference{ + { + Name: "test-1", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + + Name: "test-2", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + { + + Name: "test-3", + Kind: v1alpha1.EnvironmentConfigKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }...), + ), + }, + }, + "ErrSelectOnNotMatchingType": { + reason: "It should return when types of copared values dont match", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": float64(2.1), + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": int64(1), + }, + ), + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "data[int/weight]", + MaxMatch: pointer.Uint64(3), + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs(), + ), + err: errors.Wrap(fmt.Errorf(errFmtSortNotMatchingTypes, int64(1), reflect.Float64), "failed to build reference at index 0"), + }, + }, + "ErrSelectOnUnexpectedType": { + reason: "It should return error when compared values have unexpected types", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": true, + }, + ), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + Data: makeJSON( + map[string]interface{}{ + "int/weight": true, + }, + ), + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "data[int/weight]", + MaxMatch: pointer.Uint64(3), + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs(), + ), + err: errors.Wrap(fmt.Errorf("unexpected type bool"), "failed to build reference at index 0"), + }, + }, + "ErrSelectOnInvalidFieldPath": { + reason: "It should return error on invalid field path", + args: args{ + kube: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + list := obj.(*v1alpha1.EnvironmentConfigList) + list.Items = []v1alpha1.EnvironmentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + }, + } + return nil + }), + }, + cr: composite(), + rev: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Environment: &v1.EnvironmentConfiguration{ + EnvironmentConfigs: []v1.EnvironmentSource{ + { + Type: v1.EnvironmentSourceTypeSelector, + Selector: &v1.EnvironmentSourceSelector{ + Mode: v1.EnvironmentSourceSelectorMultiMode, + SortByFieldPath: "metadata.annotations[int/weight]", + MaxMatch: pointer.Uint64(3), + MatchLabels: []v1.EnvironmentSourceSelectorLabelMatcher{ + { + Type: v1.EnvironmentSourceSelectorLabelMatcherTypeValue, + Key: "foo", + Value: pointer.String("bar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + cr: composite( + withEnvironmentRefs(), + ), + err: errors.Wrap(fmt.Errorf("metadata.annotations: no such field"), "failed to build reference at index 0"), + }, + }, } for name, tc := range cases { diff --git a/internal/controller/apiextensions/composite/fuzz_test.go b/internal/controller/apiextensions/composite/fuzz_test.go index b5cc2fe27..62b1c2c47 100644 --- a/internal/controller/apiextensions/composite/fuzz_test.go +++ b/internal/controller/apiextensions/composite/fuzz_test.go @@ -21,9 +21,13 @@ import ( "testing" fuzz "github.com/AdaLogics/go-fuzz-headers" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource/fake" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" @@ -145,3 +149,40 @@ func FuzzTransform(f *testing.F) { _, _ = Resolve(*t, i) }) } + +func YamlToUnstructured(yamlStr string) (*unstructured.Unstructured, error) { + obj := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(yamlStr), &obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: obj}, nil +} + +func FuzzPTFComposer(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte, yamlData string) { + ff := fuzz.NewConsumer(data) + state := &PTFCompositionState{} + unstr, err := YamlToUnstructured(yamlData) + if err != nil { + return + } + if unstr.Object == nil { + return + } + state.Composite = &composite.Unstructured{Unstructured: *unstr} + cd := &managed.ConnectionDetails{} + ff.GenerateStruct(cd) + state.ConnectionDetails = *cd + _, err = FunctionIOObserved(state) + if err != nil { + return + } + + _, err = FunctionIODesired(state) + if err != nil { + return + } + UpdateResourceRefs(state) + }) +} diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index 6ae9d2662..416d4380c 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -150,16 +150,16 @@ func (fn EnvironmentSelectorFn) SelectEnvironment(ctx context.Context, cr resour // An EnvironmentFetcher fetches an appropriate environment for the supplied // composite resource. type EnvironmentFetcher interface { - Fetch(ctx context.Context, cr resource.Composite) (*env.Environment, error) + Fetch(ctx context.Context, cr resource.Composite, required bool) (*env.Environment, error) } // An EnvironmentFetcherFn fetches an appropriate environment for the supplied // composite resource. -type EnvironmentFetcherFn func(ctx context.Context, cr resource.Composite) (*env.Environment, error) +type EnvironmentFetcherFn func(ctx context.Context, cr resource.Composite, required bool) (*env.Environment, error) // Fetch an appropriate environment for the supplied Composite resource. -func (fn EnvironmentFetcherFn) Fetch(ctx context.Context, cr resource.Composite) (*env.Environment, error) { - return fn(ctx, cr) +func (fn EnvironmentFetcherFn) Fetch(ctx context.Context, cr resource.Composite, required bool) (*env.Environment, error) { + return fn(ctx, cr, required) } // A Configurator configures a composite resource using its composition. @@ -559,7 +559,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } - env, err := r.environment.Fetch(ctx, xr) + env, err := r.environment.Fetch(ctx, xr, rev.Spec.Environment.IsRequired()) if err != nil { log.Debug(errFetchEnvironment, "error", err) err = errors.Wrap(err, errFetchEnvironment) diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index ed7cc2dc7..49a0c7341 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -381,7 +381,7 @@ func TestReconcile(t *testing.T) { })), WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), WithConfigurator(ConfiguratorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { return nil })), - WithEnvironmentFetcher(EnvironmentFetcherFn(func(ctx context.Context, cr resource.Composite) (*env.Environment, error) { + WithEnvironmentFetcher(EnvironmentFetcherFn(func(ctx context.Context, cr resource.Composite, required bool) (*env.Environment, error) { return nil, errBoom })), WithCompositionUpdatePolicySelector(CompositionUpdatePolicySelectorFn(func(ctx context.Context, cr resource.Composite) error { return nil })), diff --git a/internal/controller/apiextensions/controller/options.go b/internal/controller/apiextensions/controller/options.go index f951da9c0..6f06b4868 100644 --- a/internal/controller/apiextensions/controller/options.go +++ b/internal/controller/apiextensions/controller/options.go @@ -32,4 +32,8 @@ type Options struct { // ServiceAccount for which we'll find image pull secrets for in-cluster // private registry authentication when pulling Composition Functions. ServiceAccount string + + // Registry is the default registry to use when pulling containers for + // Composition Functions + Registry string } diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 67078ebea..3a135143f 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -505,7 +505,7 @@ func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.Compos composite.WithCompositeConnectionDetailsFetcher(fetcher), composite.WithFunctionPipelineRunner(composite.NewFunctionPipeline( composite.ContainerFunctionRunnerFn(composite.RunFunction), - composite.WithKubernetesAuthentication(c, co.Namespace, co.ServiceAccount), + composite.WithKubernetesAuthentication(c, co.Namespace, co.ServiceAccount, co.Registry), )), ), composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)), diff --git a/internal/controller/pkg/resolver/reconciler.go b/internal/controller/pkg/resolver/reconciler.go index 44f37fce5..a53fd58d4 100644 --- a/internal/controller/pkg/resolver/reconciler.go +++ b/internal/controller/pkg/resolver/reconciler.go @@ -58,12 +58,12 @@ const ( errRemoveFinalizer = "cannot remove lock finalizer" errBuildDAG = "cannot build DAG" errSortDAG = "cannot sort DAG" - errMissingDependencyFmt = "missing package (%s) is not a dependency" + errFmtMissingDependency = "missing package (%s) is not a dependency" errInvalidConstraint = "version constraint on dependency is invalid" errInvalidDependency = "dependency package is not valid" errFetchTags = "cannot fetch dependency package tags" errNoValidVersion = "cannot find a valid version for package constraints" - errNoValidVersionFmt = "dependency (%s) does not have version in constraints (%s)" + errFmtNoValidVersion = "dependency (%s) does not have version in constraints (%s)" errInvalidPackageType = "cannot create invalid package dependency type" errCreateDependency = "cannot create dependency package" ) @@ -216,7 +216,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // check for missing nodes again. dep, ok := implied[0].(*v1beta1.Dependency) if !ok { - log.Debug(errInvalidDependency, "error", errors.Errorf(errMissingDependencyFmt, dep.Identifier())) + log.Debug(errInvalidDependency, "error", errors.Errorf(errFmtMissingDependency, dep.Identifier())) return reconcile.Result{Requeue: false}, nil } c, err := semver.NewConstraint(dep.Constraints) @@ -260,7 +260,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // NOTE(hasheddan): consider creating event on package revision // dictating constraints. if addVer == "" { - log.Debug(errNoValidVersion, "error", errors.Errorf(errNoValidVersionFmt, dep.Identifier(), dep.Constraints)) + log.Debug(errNoValidVersion, "error", errors.Errorf(errFmtNoValidVersion, dep.Identifier(), dep.Constraints)) return reconcile.Result{Requeue: false}, nil } diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 38bf32470..0a2ed47fc 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -41,8 +41,8 @@ const ( errNotMeta = "meta type is not a valid package" errGetOrCreateLock = "cannot get or create lock" errInitDAG = "cannot initialize dependency graph from the packages in the lock" - errIncompatibleDependencyFmt = "incompatible dependencies: %+v" - errMissingDependenciesFmt = "missing dependencies: %+v" + errFmtIncompatibleDependency = "incompatible dependencies: %+v" + errFmtMissingDependencies = "missing dependencies: %+v" errDependencyNotInGraph = "dependency is not present in graph" errDependencyNotLockPackage = "dependency in graph is not a lock package" ) @@ -160,7 +160,7 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje missing = append(missing, dep.Identifier()) } if installed != found { - return found, installed, invalid, errors.Errorf(errMissingDependenciesFmt, missing) + return found, installed, invalid, errors.Errorf(errFmtMissingDependencies, missing) } } @@ -179,7 +179,7 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje } } if len(missing) != 0 { - return found, installed, invalid, errors.Errorf(errMissingDependenciesFmt, missing) + return found, installed, invalid, errors.Errorf(errFmtMissingDependencies, missing) } // All of our dependencies and transitive dependencies must exist. Check @@ -208,7 +208,7 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje } invalid = len(invalidDeps) if invalid > 0 { - return found, installed, invalid, errors.Errorf(errIncompatibleDependencyFmt, invalidDeps) + return found, installed, invalid, errors.Errorf(errFmtIncompatibleDependency, invalidDeps) } return found, installed, invalid, nil } diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index 4543db983..8ad5d3d05 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -353,7 +353,7 @@ func TestResolve(t *testing.T) { }, want: want{ total: 2, - err: errors.Errorf(errMissingDependenciesFmt, []string{"not-here-1", "not-here-2"}), + err: errors.Errorf(errFmtMissingDependencies, []string{"not-here-1", "not-here-2"}), }, }, "ErrorSelfExistMissingDependencies": { @@ -441,7 +441,7 @@ func TestResolve(t *testing.T) { want: want{ total: 3, installed: 1, - err: errors.Errorf(errMissingDependenciesFmt, []string{"not-here-2", "not-here-3"}), + err: errors.Errorf(errFmtMissingDependencies, []string{"not-here-2", "not-here-3"}), }, }, "ErrorSelfExistInvalidDependencies": { @@ -540,7 +540,7 @@ func TestResolve(t *testing.T) { total: 3, installed: 3, invalid: 2, - err: errors.Errorf(errIncompatibleDependencyFmt, []string{"not-here-1", "not-here-2"}), + err: errors.Errorf(errFmtIncompatibleDependency, []string{"not-here-1", "not-here-2"}), }, }, "SuccessfulSelfExistValidDependencies": { diff --git a/internal/controller/pkg/revision/fuzz_test.go b/internal/controller/pkg/revision/fuzz_test.go new file mode 100644 index 000000000..f605419ea --- /dev/null +++ b/internal/controller/pkg/revision/fuzz_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "bytes" + "context" + "errors" + "io" + "testing" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/parser" + "github.com/crossplane/crossplane-runtime/pkg/test" + + pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + v1 "github.com/crossplane/crossplane/apis/pkg/v1" + "github.com/crossplane/crossplane/apis/pkg/v1beta1" + "github.com/crossplane/crossplane/internal/dag" + dagfake "github.com/crossplane/crossplane/internal/dag/fake" + "github.com/crossplane/crossplane/internal/xpkg" +) + +var ( + metaScheme *runtime.Scheme + objScheme *runtime.Scheme + linter = xpkg.NewProviderLinter() +) + +func init() { + var err error + metaScheme, err = xpkg.BuildMetaScheme() + if err != nil { + panic(err) + } + objScheme, err = xpkg.BuildObjectScheme() + if err != nil { + panic(err) + } +} + +func newFuzzDag(ff *fuzz.ConsumeFuzzer) (func() dag.DAG, error) { + traceNodeMap := make(map[string]dag.Node) + err := ff.FuzzMap(&traceNodeMap) + if err != nil { + return func() dag.DAG { return nil }, err + } + lp := &v1beta1.LockPackage{} + err = ff.GenerateStruct(lp) + if err != nil { + return func() dag.DAG { return nil }, err + } + return func() dag.DAG { + return &dagfake.MockDag{ + MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + return nil, nil + }, + MockNodeExists: func(identifier string) bool { + return true + }, + MockTraceNode: func(_ string) (map[string]dag.Node, error) { + return traceNodeMap, nil + }, + MockGetNode: func(s string) (dag.Node, error) { + return lp, nil + }, + } + }, nil +} + +func getFuzzMockClient(ff *fuzz.ConsumeFuzzer) (*test.MockClient, error) { + lockPackages := make([]v1beta1.LockPackage, 0) + ff.CreateSlice(&lockPackages) + if len(lockPackages) == 0 { + return nil, errors.New("No packages created") + } + return &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + l := obj.(*v1beta1.Lock) + l.Packages = lockPackages + return nil + }), + MockUpdate: test.NewMockUpdateFn(nil), + }, nil +} + +func FuzzRevisionControllerPackageHandling(f *testing.F) { + f.Fuzz(func(t *testing.T, data, revisionData []byte) { + ff := fuzz.NewConsumer(revisionData) + p := parser.New(metaScheme, objScheme) + r := io.NopCloser(bytes.NewReader(data)) + pkg, err := p.Parse(context.Background(), r) + if err != nil { + return + } + if len(pkg.GetMeta()) == 0 { + return + } + if len(pkg.GetObjects()) == 0 { + return + } + prs := &v1.PackageRevisionSpec{} + ff.GenerateStruct(prs) + pr := &v1.ConfigurationRevision{Spec: *prs} + + if err := linter.Lint(pkg); err != nil { + return + } + pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}) + c, err := getFuzzMockClient(ff) + if err != nil { + return + } + + fd, err := newFuzzDag(ff) + if err != nil { + return + } + pm := &PackageDependencyManager{ + client: c, + newDag: fd, + } + _, _, _, _ = pm.Resolve(context.Background(), pkgMeta, pr) + }) +} diff --git a/internal/controller/pkg/revision/imageback.go b/internal/controller/pkg/revision/imageback.go index 989e5c100..dea27e366 100644 --- a/internal/controller/pkg/revision/imageback.go +++ b/internal/controller/pkg/revision/imageback.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/validate" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/parser" @@ -39,11 +40,16 @@ const ( errGetUncompressed = "failed to get uncompressed contents from layer" errMultipleAnnotatedLayers = "package is invalid due to multiple annotated base layers" errOpenPackageStream = "failed to open package stream file" + errFmtMaxManifestLayers = "package has %d layers, but only %d are allowed" + errValidateLayer = "invalid package layer" + errValidateImage = "invalid package image" ) const ( layerAnnotation = "io.crossplane.xpkg" baseAnnotationValue = "base" + // maxLayers is the maximum number of layers an image can have. + maxLayers = 256 ) // ImageBackend is a backend for parser. @@ -101,6 +107,12 @@ func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io if err != nil { return nil, errors.Wrap(err, errGetManifest) } + + // Check that the image has less than the maximum allowed number of layers. + if nLayers := len(manifest.Layers); nLayers > maxLayers { + return nil, errors.Errorf(errFmtMaxManifestLayers, nLayers, maxLayers) + } + // Determine if the image is using annotated layers. var tarc io.ReadCloser foundAnnotated := false @@ -121,6 +133,9 @@ func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io if err != nil { return nil, errors.Wrap(err, errFetchLayer) } + if err := validate.Layer(layer); err != nil { + return nil, errors.Wrap(err, errValidateLayer) + } tarc, err = layer.Uncompressed() if err != nil { return nil, errors.Wrap(err, errGetUncompressed) @@ -129,6 +144,9 @@ func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io // If we still don't have content then we need to flatten image filesystem. if !foundAnnotated { + if err := validate.Image(img); err != nil { + return nil, errors.Wrap(err, errValidateImage) + } tarc = mutate.Extract(img) } diff --git a/internal/controller/pkg/revision/imageback_test.go b/internal/controller/pkg/revision/imageback_test.go index add68a80f..d1f6a1617 100644 --- a/internal/controller/pkg/revision/imageback_test.go +++ b/internal/controller/pkg/revision/imageback_test.go @@ -17,18 +17,14 @@ limitations under the License. package revision import ( - "archive/tar" - "bytes" "context" "io" - "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/random" - "github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -57,23 +53,24 @@ func TestImageBackend(t *testing.T) { }, }) - streamCont := "somestreamofyaml" - tarBuf := new(bytes.Buffer) - tw := tar.NewWriter(tarBuf) - hdr := &tar.Header{ - Name: xpkg.StreamFile, - Mode: int64(xpkg.StreamFileMode), - Size: int64(len(streamCont)), - } - _ = tw.WriteHeader(hdr) - _, _ = io.Copy(tw, strings.NewReader(streamCont)) - _ = tw.Close() - packLayer, _ := tarball.LayerFromOpener(func() (io.ReadCloser, error) { - // NOTE(hasheddan): we must construct a new reader each time as we - // ingest packImg in multiple tests below. - return io.NopCloser(bytes.NewReader(tarBuf.Bytes())), nil - }) - packImg, _ := mutate.AppendLayers(empty.Image, packLayer) + // TODO(phisco): uncomment when https://github.com/google/go-containerregistry/pull/1758 is merged + // streamCont := "somestreamofyaml" + // tarBuf := new(bytes.Buffer) + // tw := tar.NewWriter(tarBuf) + // hdr := &tar.Header{ + // Name: xpkg.StreamFile, + // Mode: int64(xpkg.StreamFileMode), + // Size: int64(len(streamCont)), + // } + // _ = tw.WriteHeader(hdr) + // _, _ = io.Copy(tw, strings.NewReader(streamCont)) + // _ = tw.Close() + // packLayer, _ := tarball.LayerFromOpener(func() (io.ReadCloser, error) { + // // NOTE(hasheddan): we must construct a new reader each time as we + // // ingest packImg in multiple tests below. + // return io.NopCloser(bytes.NewReader(tarBuf.Bytes())), nil + // }) + // packImg, _ := mutate.AppendLayers(empty.Image, packLayer) type args struct { f xpkg.Fetcher @@ -152,19 +149,20 @@ func TestImageBackend(t *testing.T) { }, want: errors.Wrap(errBoom, errFetchPackage), }, - "SuccessFetchPackage": { - reason: "Should not return error is package is not in cache but is fetched successfully.", - args: args{ - f: &fake.MockFetcher{ - MockFetch: fake.NewMockFetchFn(packImg, nil), - }, - opts: []parser.BackendOption{PackageRevision(&v1.ProviderRevision{ - Spec: v1.PackageRevisionSpec{ - Package: "test/test:latest", - }, - })}, - }, - }, + // TODO(phisco): uncomment when https://github.com/google/go-containerregistry/pull/1758 is merged + // "SuccessFetchPackage": { + // reason: "Should not return error is package is not in cache but is fetched successfully.", + // args: args{ + // f: &fake.MockFetcher{ + // MockFetch: fake.NewMockFetchFn(packImg, nil), + // }, + // opts: []parser.BackendOption{PackageRevision(&v1.ProviderRevision{ + // Spec: v1.PackageRevisionSpec{ + // Package: "test/test:latest", + // }, + // })}, + // }, + // }, } for name, tc := range cases { diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index b8448a533..c8bbcece0 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -55,6 +55,8 @@ import ( const ( reconcileTimeout = 3 * time.Minute + // the max size of a package parsed by the parser + maxPackageSize = 200 << 20 // 100 MB ) const ( @@ -475,7 +477,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } // Parse package contents. - pkg, err := r.parser.Parse(ctx, rc) + pkg, err := r.parser.Parse(ctx, struct { + io.Reader + io.Closer + }{ + Reader: io.LimitReader(rc, maxPackageSize), + Closer: rc, + }) // Wait until we finish writing to cache. Parser closes the reader. if err := <-cacheWrite; err != nil { // If we failed to cache we want to cleanup, but we don't abort unless diff --git a/internal/controller/rbac/definition/reconciler.go b/internal/controller/rbac/definition/reconciler.go index 86d5faaf6..2a861b8f3 100644 --- a/internal/controller/rbac/definition/reconciler.go +++ b/internal/controller/rbac/definition/reconciler.go @@ -19,6 +19,8 @@ package definition import ( "context" + "fmt" + "sort" "strings" "time" @@ -178,9 +180,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: false}, nil } + applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(d) { cr := cr // Pin range variable so we can take its address. - log = log.WithValues("role-name", cr.GetName()) + log := log.WithValues("role-name", cr.GetName()) err := r.client.Apply(ctx, &cr, resource.MustBeControllableBy(d.GetUID()), resource.AllowUpdateIf(ClusterRolesDiffer)) if resource.IsNotAllowed(err) { log.Debug("Skipped no-op RBAC ClusterRole apply") @@ -193,11 +196,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } log.Debug("Applied RBAC ClusterRole") + applied = append(applied, cr.GetName()) + } + + if len(applied) > 0 { + sort.Strings(applied) + r.record.Event(d, event.Normal(reasonApplyRoles, fmt.Sprintf("Applied RBAC ClusterRoles: %s", firstNAndSomeMore(applied)))) } // TODO(negz): Add a condition that indicates the RBAC manager is managing // cluster roles for this XRD? - r.record.Event(d, event.Normal(reasonApplyRoles, "Applied RBAC ClusterRoles")) // There's no need to requeue explicitly - we're watching all XRDs. return reconcile.Result{Requeue: false}, nil @@ -211,3 +219,10 @@ func ClusterRolesDiffer(current, desired runtime.Object) bool { d := desired.(*rbacv1.ClusterRole) return !cmp.Equal(c.GetLabels(), d.GetLabels()) || !cmp.Equal(c.Rules, d.Rules) } + +func firstNAndSomeMore(names []string) string { + if len(names) > 3 { + return fmt.Sprintf("%s, and %d more", strings.Join(names[:3], ", "), len(names)-3) + } + return strings.Join(names, ", ") +} diff --git a/internal/controller/rbac/namespace/reconciler.go b/internal/controller/rbac/namespace/reconciler.go index c51906638..7b0dcf1c6 100644 --- a/internal/controller/rbac/namespace/reconciler.go +++ b/internal/controller/rbac/namespace/reconciler.go @@ -19,6 +19,8 @@ package namespace import ( "context" + "fmt" + "sort" "strings" "time" @@ -193,8 +195,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } + var applied []string //nolint:prealloc // We don't know how many roles we'll apply. for _, rl := range r.rbac.RenderRoles(ns, l.Items) { - log = log.WithValues("role-name", rl.GetName()) + log := log.WithValues("role-name", rl.GetName()) rl := rl // Pin range variable so we can take its address. err := r.client.Apply(ctx, &rl, resource.MustBeControllableBy(ns.GetUID()), resource.AllowUpdateIf(RolesDiffer)) @@ -210,9 +213,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } log.Debug("Applied RBAC Role") + applied = append(applied, rl.GetName()) + } + + if len(applied) > 0 { + sort.Strings(applied) + r.record.Event(ns, event.Normal(reasonApplyRoles, fmt.Sprintf("Applied RBAC Roles: %s", firstNAndSomeMore(applied)))) } - r.record.Event(ns, event.Normal(reasonApplyRoles, "Applied RBAC Roles")) return reconcile.Result{Requeue: false}, nil } @@ -242,3 +250,10 @@ func equalRolesAnnotations(current, desired *rbacv1.Role) bool { } return cmp.Equal(currentFiltered, desiredFiltered) } + +func firstNAndSomeMore(names []string) string { + if len(names) > 3 { + return fmt.Sprintf("%s, and %d more", strings.Join(names[:3], ", "), len(names)-3) + } + return strings.Join(names, ", ") +} diff --git a/internal/controller/rbac/provider/binding/reconciler.go b/internal/controller/rbac/provider/binding/reconciler.go index 014decf75..5600f8ab9 100644 --- a/internal/controller/rbac/provider/binding/reconciler.go +++ b/internal/controller/rbac/provider/binding/reconciler.go @@ -19,12 +19,15 @@ package binding import ( "context" + "fmt" "strings" "time" + "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -171,6 +174,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // ProviderRevision. Each revision should control at most one, but it's easy // and relatively harmless for us to handle there being many. subjects := make([]rbacv1.Subject, 0) + subjectStrings := make([]string, 0) for _, sa := range l.Items { for _, ref := range sa.GetOwnerReferences() { if ref.UID == pr.GetUID() { @@ -179,6 +183,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco Namespace: sa.GetNamespace(), Name: sa.GetName(), }) + subjectStrings = append(subjectStrings, sa.GetNamespace()+"/"+sa.GetName()) } } } @@ -204,15 +209,30 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco "subjects", subjects, ) - if err := r.client.Apply(ctx, rb, resource.MustBeControllableBy(pr.GetUID())); err != nil { + err := r.client.Apply(ctx, rb, resource.MustBeControllableBy(pr.GetUID()), resource.AllowUpdateIf(ClusterRoleBindingsDiffer)) + if resource.IsNotAllowed(err) { + log.Debug("Skipped no-op ClusterRoleBinding apply") + return reconcile.Result{}, nil + } + if err != nil { log.Debug(errApplyBinding, "error", err) err = errors.Wrap(err, errApplyBinding) r.record.Event(pr, event.Warning(reasonBind, err)) return reconcile.Result{}, err } + + r.record.Event(pr, event.Normal(reasonBind, fmt.Sprintf("Bound system ClusterRole %q to provider ServiceAccount(s): %s", n, strings.Join(subjectStrings, ", ")))) log.Debug("Applied system ClusterRoleBinding") - r.record.Event(pr, event.Normal(reasonBind, "Bound system ClusterRole to provider ServiceAccount(s)")) // There's no need to requeue explicitly - we're watching all PRs. return reconcile.Result{Requeue: false}, nil } + +// ClusterRoleBindingsDiffer returns true if the supplied objects are different ClusterRoleBindings. We +// consider ClusterRoleBindings to be different if the subjects, the roleRefs, or the owner ref +// is different. +func ClusterRoleBindingsDiffer(current, desired runtime.Object) bool { + c := current.(*rbacv1.ClusterRoleBinding) + d := desired.(*rbacv1.ClusterRoleBinding) + return !cmp.Equal(c.Subjects, d.Subjects) || !cmp.Equal(c.RoleRef, d.RoleRef) || !cmp.Equal(c.GetOwnerReferences(), d.GetOwnerReferences()) +} diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index 251afd00e..1f0027d39 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -19,6 +19,8 @@ package roles import ( "context" + "fmt" + "sort" "strings" "time" @@ -322,9 +324,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: false}, nil } + applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(pr, resources) { cr := cr // Pin range variable so we can take its address. - log = log.WithValues("role-name", cr.GetName()) + log := log.WithValues("role-name", cr.GetName()) err := r.client.Apply(ctx, &cr, resource.MustBeControllableBy(pr.GetUID()), resource.AllowUpdateIf(ClusterRolesDiffer)) if resource.IsNotAllowed(err) { log.Debug("Skipped no-op RBAC ClusterRole apply") @@ -337,11 +340,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } log.Debug("Applied RBAC ClusterRole") + applied = append(applied, cr.GetName()) + } + + if len(applied) > 0 { + sort.Strings(applied) + r.record.Event(pr, event.Normal(reasonApplyRoles, fmt.Sprintf("Applied RBAC ClusterRoles: %s", firstNAndSomeMore(applied)))) } // TODO(negz): Add a condition that indicates the RBAC manager is // managing cluster roles for this ProviderRevision? - r.record.Event(pr, event.Normal(reasonApplyRoles, "Applied RBAC ClusterRoles")) // There's no need to requeue explicitly - we're watching all PRs. return reconcile.Result{Requeue: false}, nil @@ -421,3 +429,10 @@ func (d OrgDiffer) Differs(a, b string) bool { return oa != ob } + +func firstNAndSomeMore(names []string) string { + if len(names) > 3 { + return fmt.Sprintf("%s, and %d more", strings.Join(names[:3], ", "), len(names)-3) + } + return strings.Join(names, ", ") +} diff --git a/internal/controller/rbac/provider/roles/requests.go b/internal/controller/rbac/provider/roles/requests.go index a463ababa..939a901d9 100644 --- a/internal/controller/rbac/provider/roles/requests.go +++ b/internal/controller/rbac/provider/roles/requests.go @@ -29,7 +29,9 @@ import ( // Error strings. const ( - errGetClusterRole = "cannot get ClusterRole" + errGetClusterRole = "cannot get ClusterRole" + errExpandClusterRoleRules = "cannot expand ClusterRole rules" + errExpandPermissionRequests = "cannot expand PermissionRequests" ) const ( @@ -126,11 +128,17 @@ func (r Rule) path() path { } // Expand RBAC policy rules into our granular rules. -func Expand(rs ...rbacv1.PolicyRule) []Rule { +func Expand(ctx context.Context, rs ...rbacv1.PolicyRule) ([]Rule, error) { //nolint:gocyclo // Granular rules are inherently complex. out := make([]Rule, 0, len(rs)) for _, r := range rs { for _, u := range r.NonResourceURLs { for _, v := range r.Verbs { + // exit if ctx is done + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } out = append(out, Rule{NonResourceURL: u, Verb: v}) } } @@ -147,13 +155,18 @@ func Expand(rs ...rbacv1.PolicyRule) []Rule { for _, rsc := range r.Resources { for _, n := range names { for _, v := range r.Verbs { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } out = append(out, Rule{APIGroup: g, Resource: rsc, ResourceName: n, Verb: v}) } } } } } - return out + return out, nil } // A ClusterRoleBackedValidator is a PermissionRequestsValidator that validates @@ -170,7 +183,7 @@ func NewClusterRoleBackedValidator(c client.Client, roleName string) *ClusterRol return &ClusterRoleBackedValidator{client: c, name: roleName} } -// ValidatePermissionRequests against the ClusterRole. +// ValidatePermissionRequests against the ClusterRole, returning the list of rejected rules. func (v *ClusterRoleBackedValidator) ValidatePermissionRequests(ctx context.Context, requests ...rbacv1.PolicyRule) ([]Rule, error) { cr := &rbacv1.ClusterRole{} if err := v.client.Get(ctx, types.NamespacedName{Name: v.name}, cr); err != nil { @@ -178,12 +191,20 @@ func (v *ClusterRoleBackedValidator) ValidatePermissionRequests(ctx context.Cont } t := newNode() - for _, rule := range Expand(cr.Rules...) { + expandedCrRules, err := Expand(ctx, cr.Rules...) + if err != nil { + return nil, errors.Wrap(err, errExpandClusterRoleRules) + } + for _, rule := range expandedCrRules { t.Allow(rule.path()) } rejected := make([]Rule, 0) - for _, rule := range Expand(requests...) { + expandedRequests, err := Expand(ctx, requests...) + if err != nil { + return nil, errors.Wrap(err, errExpandPermissionRequests) + } + for _, rule := range expandedRequests { if !t.Allowed(rule.path()) { rejected = append(rejected, rule) } @@ -194,6 +215,6 @@ func (v *ClusterRoleBackedValidator) ValidatePermissionRequests(ctx context.Cont // VerySecureValidator is a PermissionRequestsValidatorFn that rejects all // requested permissions. -func VerySecureValidator(_ context.Context, requests ...rbacv1.PolicyRule) ([]Rule, error) { - return Expand(requests...), nil +func VerySecureValidator(ctx context.Context, requests ...rbacv1.PolicyRule) ([]Rule, error) { + return Expand(ctx, requests...) } diff --git a/internal/controller/rbac/provider/roles/requests_test.go b/internal/controller/rbac/provider/roles/requests_test.go index be1535577..d4de22b16 100644 --- a/internal/controller/rbac/provider/roles/requests_test.go +++ b/internal/controller/rbac/provider/roles/requests_test.go @@ -83,79 +83,129 @@ func TestAllowed(t *testing.T) { } func TestExpand(t *testing.T) { + type args struct { + rs []rbacv1.PolicyRule + ctx context.Context + } + type want struct { + err error + rules []Rule + } cases := map[string]struct { reason string - rs []rbacv1.PolicyRule - want []Rule + args + want }{ "SimpleURL": { reason: "It should be possible to expand a simple, granular non-resource RBAC rule.", - rs: []rbacv1.PolicyRule{{ - NonResourceURLs: []string{"/api"}, - Verbs: []string{"get"}, - }}, - want: []Rule{{ - NonResourceURL: "/api", - Verb: "get", - }}, + args: args{ + rs: []rbacv1.PolicyRule{{ + NonResourceURLs: []string{"/api"}, + Verbs: []string{"get"}, + }}, + }, + want: want{ + rules: []Rule{{ + NonResourceURL: "/api", + Verb: "get", + }}, + }, }, "SimpleResource": { reason: "It should be possible to expand a simple, granular resource RBAC rule.", - rs: []rbacv1.PolicyRule{{ - APIGroups: []string{""}, - Resources: []string{"*"}, - Verbs: []string{"get"}, - }}, - want: []Rule{{ - APIGroup: "", - Resource: "*", - ResourceName: "*", - Verb: "get", - }}, + args: args{ + rs: []rbacv1.PolicyRule{{ + APIGroups: []string{""}, + Resources: []string{"*"}, + Verbs: []string{"get"}, + }}, + }, + want: want{ + rules: []Rule{{ + APIGroup: "", + Resource: "*", + ResourceName: "*", + Verb: "get", + }}, + }, }, "ComplexResource": { reason: "It should be possible to expand a more complex resource RBAC rule.", - rs: []rbacv1.PolicyRule{ - {APIGroups: []string{""}, Resources: []string{"*"}, Verbs: []string{"get", "list", "watch"}}, - {APIGroups: []string{"example"}, Resources: []string{"examples", "others"}, ResourceNames: []string{"barry", "hank"}, Verbs: []string{"get"}}, + args: args{ + rs: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"*"}, Verbs: []string{"get", "list", "watch"}}, + {APIGroups: []string{"example"}, Resources: []string{"examples", "others"}, ResourceNames: []string{"barry", "hank"}, Verbs: []string{"get"}}, + }, }, - want: []Rule{ - {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "get"}, - {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "list"}, - {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "watch"}, - {APIGroup: "example", Resource: "examples", ResourceName: "barry", Verb: "get"}, - {APIGroup: "example", Resource: "examples", ResourceName: "hank", Verb: "get"}, - {APIGroup: "example", Resource: "others", ResourceName: "barry", Verb: "get"}, - {APIGroup: "example", Resource: "others", ResourceName: "hank", Verb: "get"}, + want: want{ + rules: []Rule{ + {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "get"}, + {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "list"}, + {APIGroup: "", Resource: "*", ResourceName: "*", Verb: "watch"}, + {APIGroup: "example", Resource: "examples", ResourceName: "barry", Verb: "get"}, + {APIGroup: "example", Resource: "examples", ResourceName: "hank", Verb: "get"}, + {APIGroup: "example", Resource: "others", ResourceName: "barry", Verb: "get"}, + {APIGroup: "example", Resource: "others", ResourceName: "hank", Verb: "get"}, + }, }, }, "Combo": { reason: "We should faithfully expand a rule with both URLs and resources. This is invalid, but we let Kubernetes police that.", - rs: []rbacv1.PolicyRule{{ - APIGroups: []string{""}, - Resources: []string{"*"}, - NonResourceURLs: []string{"/api"}, - Verbs: []string{"get"}, - }}, - want: []Rule{ - { - NonResourceURL: "/api", - Verb: "get", - }, - { - APIGroup: "", - Resource: "*", - ResourceName: "*", - Verb: "get", + args: args{ + rs: []rbacv1.PolicyRule{{ + APIGroups: []string{""}, + Resources: []string{"*"}, + NonResourceURLs: []string{"/api"}, + Verbs: []string{"get"}, + }}, + }, + want: want{ + rules: []Rule{ + { + NonResourceURL: "/api", + Verb: "get", + }, + { + APIGroup: "", + Resource: "*", + ResourceName: "*", + Verb: "get", + }, }, }, }, + "ComboCtxCancelled": { + reason: "We should return an error if the context is cancelled.", + args: args{ + rs: []rbacv1.PolicyRule{{ + APIGroups: []string{""}, + Resources: []string{"*"}, + NonResourceURLs: []string{"/api"}, + Verbs: []string{"get"}, + }}, + ctx: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx + }(), + }, + want: want{ + err: context.Canceled, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - got := Expand(tc.rs...) - if diff := cmp.Diff(tc.want, got); diff != "" { + ctx := tc.args.ctx + if ctx == nil { + ctx = context.Background() + } + got, err := Expand(ctx, tc.rs...) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nExpand(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.rules, got); diff != "" { t.Errorf("\n%s\nExpand(...): -want, +got:\n%s", tc.reason, diff) } }) @@ -229,6 +279,7 @@ func TestValidatePermissionRequests(t *testing.T) { }, }, args: args{ + ctx: context.Background(), requests: []rbacv1.PolicyRule{ // Allowed - we allow * on secrets. { @@ -270,6 +321,81 @@ func TestValidatePermissionRequests(t *testing.T) { }, }, }, + "SuccessfulRejectEvenWithTimeout": { + fields: fields{ + c: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + cr := obj.(*rbacv1.ClusterRole) + cr.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets", "configmaps", "events"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"list"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + ResourceNames: []string{"this-one-really-cool-pod"}, + Verbs: []string{"*"}, + }, + } + return nil + }), + }, + }, + args: args{ + ctx: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx + }(), + requests: []rbacv1.PolicyRule{ + // Allowed - we allow * on secrets. + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"*"}, + }, + // Allowed - we allow * on configmaps. + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + // Rejected - we don't allow get on extensions/deployments. + { + APIGroups: []string{"extensions"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list"}, + }, + // Allowed - we allow get and list on apps/deployments. + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list"}, + }, + // Rejected - we only allow access to really cool pods. + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list"}, + }, + }, + }, + want: want{ + err: errors.Wrap(context.Canceled, errExpandClusterRoleRules), + }, + }, } for name, tc := range cases { diff --git a/internal/dag/dag.go b/internal/dag/dag.go index 5dc2b9ec6..37d3e2ac5 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -213,6 +213,9 @@ func (d *MapDag) visit(name string, neighbors []Node, stack map[string]bool, vis stack[name] = true for _, n := range neighbors { if !visited[n.Identifier()] { + if _, ok := d.nodes[n.Identifier()]; !ok { + return errors.Errorf("node %q does not exist", n.Identifier()) + } if err := d.visit(n.Identifier(), d.nodes[n.Identifier()].Neighbors(), stack, visited, results); err != nil { return err } diff --git a/internal/dag/fuzz_test.go b/internal/dag/fuzz_test.go index 5fd2e7d88..5d7700ae8 100644 --- a/internal/dag/fuzz_test.go +++ b/internal/dag/fuzz_test.go @@ -43,6 +43,9 @@ func (s *SimpleFuzzNode) AddNeighbors(nodes ...Node) error { if !ok { return errors.New("not a simple node") } + if s.NeighborsField == nil { + s.NeighborsField = make(map[string]SimpleFuzzNode) + } s.NeighborsField[sn.Identifier()] = *sn } return nil @@ -71,8 +74,29 @@ func FuzzDag(f *testing.F) { if err != nil { return } + for _, n := range nodes { + if n.NeighborsField == nil { + n.NeighborsField = make(map[string]SimpleFuzzNode) + } + } d := NewMapDag() _, _ = d.Init(toNodesFuzz(nodes)) + identifier, err := c.GetString() + if err != nil { + return + } + d.Sort() + _, _ = d.TraceNode(identifier) + d.Sort() + from, err := c.GetString() + if err != nil { + return + } + fuzzNode := &SimpleFuzzNode{} + c.GenerateStruct(fuzzNode) + _, _ = d.AddEdge(from, fuzzNode) + d.Sort() + d.NodeNeighbors(identifier) }) } diff --git a/internal/initializer/crds_migrator.go b/internal/initializer/crds_migrator.go new file mode 100644 index 000000000..0a7987dc8 --- /dev/null +++ b/internal/initializer/crds_migrator.go @@ -0,0 +1,112 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "context" + + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +// NewCoreCRDsMigrator returns a new *CoreCRDsMigrator. +func NewCoreCRDsMigrator(crdName, sourceVersion string) *CoreCRDsMigrator { + c := &CoreCRDsMigrator{ + crdName: crdName, + oldVersion: sourceVersion, + } + return c +} + +// CoreCRDsMigrator makes sure the CRDs are using the latest storage version. +type CoreCRDsMigrator struct { + crdName string + oldVersion string +} + +// Run applies all CRDs in the given directory. +func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(phisco) refactor + var crd extv1.CustomResourceDefinition + if err := kube.Get(ctx, client.ObjectKey{Name: c.crdName}, &crd); err != nil { + if kerrors.IsNotFound(err) { + // nothing to do + return nil + } + return errors.Wrapf(err, "cannot get %s crd", c.crdName) + } + // no old version in the crd, nothing to do + if !sets.NewString(crd.Status.StoredVersions...).Has(c.oldVersion) { + return nil + } + // we need to patch all resources to the new storage version + var storageVersion string + for _, v := range crd.Spec.Versions { + if v.Storage { + storageVersion = v.Name + break + } + } + var resources = unstructured.UnstructuredList{} + resources.SetGroupVersionKind(schema.GroupVersionKind{ + Group: crd.Spec.Group, + Version: storageVersion, + Kind: crd.Spec.Names.ListKind, + }) + var continueToken string + for { + if err := kube.List(ctx, &resources, + client.Limit(500), + client.Continue(continueToken), + ); err != nil { + return errors.Wrapf(err, "cannot list %s", resources.GroupVersionKind().String()) + } + for i := range resources.Items { + // apply empty patch for storage version upgrade + res := resources.Items[i] + if err := kube.Patch(ctx, &res, client.RawPatch(types.MergePatchType, []byte(`{}`))); err != nil { + return errors.Wrapf(err, "cannot patch %s %q", crd.Spec.Names.Kind, res.GetName()) + } + } + continueToken = resources.GetContinue() + if continueToken == "" { + break + } + } + + origCrd := crd.DeepCopy() + crd.Status.StoredVersions = []string{storageVersion} + if err := kube.Status().Patch(ctx, &crd, client.MergeFrom(origCrd)); err != nil { + return errors.Wrapf(err, "couldn't update %s crd", c.crdName) + } + + // One more check just to be sure we actually updated the crd + if err := kube.Get(ctx, client.ObjectKey{Name: c.crdName}, &crd); err != nil { + return errors.Wrapf(err, "cannot get %s crd to check", c.crdName) + } + if len(crd.Status.StoredVersions) != 1 || crd.Status.StoredVersions[0] != storageVersion { + return errors.Errorf("was expecting CRD %q to only have %s, got instead: %v", c.crdName, storageVersion, crd.Status.StoredVersions) + } + + return nil +} diff --git a/internal/initializer/initializer.go b/internal/initializer/initializer.go index 1eb033c64..c2fab199e 100644 --- a/internal/initializer/initializer.go +++ b/internal/initializer/initializer.go @@ -19,6 +19,7 @@ package initializer import ( "context" + "fmt" "reflect" "sigs.k8s.io/controller-runtime/pkg/client" @@ -47,10 +48,23 @@ type Initializer struct { // Init does all operations necessary for controllers and webhooks to work. func (c *Initializer) Init(ctx context.Context) error { for _, s := range c.steps { + if s == nil { + continue + } if err := s.Run(ctx, c.kube); err != nil { return err } - c.log.Info("Step has been completed", "Name", reflect.TypeOf(s).Elem().Name()) + t := reflect.TypeOf(s) + var name string + if t != nil { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + name = t.Name() + } else { + name = fmt.Sprintf("%T", s) + } + c.log.Info("Step has been completed", "Name", name) } return nil } diff --git a/internal/initializer/installer_test.go b/internal/initializer/installer_test.go index cc6f64373..6a4ffd62b 100644 --- a/internal/initializer/installer_test.go +++ b/internal/initializer/installer_test.go @@ -34,12 +34,12 @@ import ( ) const ( - errGetProviderFmt = "unexpected name in provider get: %s" - errPatchProviderFmt = "unexpected name in provider update: %s" - errPatchProviderSourceFmt = "unexpected source in provider update: %s" - errGetConfigurationFmt = "unexpected name in configuration get: %s" - errPatchConfigurationFmt = "unexpected name in configuration update: %s" - errPatchConfigurationSourceFmt = "unexpected source in configuration update: %s" + errFmtGetProvider = "unexpected name in provider get: %s" + errFmtPatchProvider = "unexpected name in provider update: %s" + errFmtPatchProviderSource = "unexpected source in provider update: %s" + errFmtGetConfiguration = "unexpected name in configuration get: %s" + errFmtPatchConfiguration = "unexpected name in configuration update: %s" + errFmtPatchConfigurationSource = "unexpected source in configuration update: %s" ) var errBoom = errors.New("boom") @@ -111,11 +111,11 @@ func TestInstaller(t *testing.T) { switch obj.(type) { case *v1.Provider: if key.Name != p1Name { - t.Errorf(errGetProviderFmt, key.Name) + t.Errorf(errFmtGetProvider, key.Name) } case *v1.Configuration: if key.Name != c1Name { - t.Errorf(errGetConfigurationFmt, key.Name) + t.Errorf(errFmtGetConfiguration, key.Name) } default: t.Errorf("unexpected type") @@ -126,11 +126,11 @@ func TestInstaller(t *testing.T) { switch obj.(type) { case *v1.Provider: if obj.GetName() != p1Name { - t.Errorf(errPatchProviderFmt, obj.GetName()) + t.Errorf(errFmtPatchProvider, obj.GetName()) } case *v1.Configuration: if obj.GetName() != c1Name { - t.Errorf(errPatchConfigurationFmt, obj.GetName()) + t.Errorf(errFmtPatchConfiguration, obj.GetName()) } default: t.Errorf("unexpected type") @@ -186,11 +186,11 @@ func TestInstaller(t *testing.T) { switch obj.(type) { case *v1.Provider: if key.Name != p1Existing { - t.Errorf(errGetProviderFmt, key.Name) + t.Errorf(errFmtGetProvider, key.Name) } case *v1.Configuration: if key.Name != c1Existing { - t.Errorf(errGetConfigurationFmt, key.Name) + t.Errorf(errFmtGetConfiguration, key.Name) } default: t.Errorf("unexpected type") @@ -201,17 +201,17 @@ func TestInstaller(t *testing.T) { switch o := obj.(type) { case *v1.Provider: if o.GetName() != p1Existing { - t.Errorf(errPatchProviderFmt, o.GetName()) + t.Errorf(errFmtPatchProvider, o.GetName()) } if o.GetSource() != p1 { - t.Errorf(errPatchProviderSourceFmt, o.GetSource()) + t.Errorf(errFmtPatchProviderSource, o.GetSource()) } case *v1.Configuration: if o.GetName() != c1Existing { - t.Errorf(errPatchConfigurationFmt, o.GetName()) + t.Errorf(errFmtPatchConfiguration, o.GetName()) } if o.GetSource() != c1 { - t.Errorf(errPatchConfigurationSourceFmt, o.GetSource()) + t.Errorf(errFmtPatchConfigurationSource, o.GetSource()) } default: t.Errorf("unexpected type") @@ -233,11 +233,11 @@ func TestInstaller(t *testing.T) { switch obj.(type) { case *v1.Provider: if key.Name != p1Name { - t.Errorf(errGetProviderFmt, key.Name) + t.Errorf(errFmtGetProvider, key.Name) } case *v1.Configuration: if key.Name != c1Name { - t.Errorf(errGetConfigurationFmt, key.Name) + t.Errorf(errFmtGetConfiguration, key.Name) } default: t.Errorf("unexpected type") @@ -293,11 +293,11 @@ func TestInstaller(t *testing.T) { switch obj.(type) { case *v1.Provider: if key.Name != p1Name { - t.Errorf(errGetProviderFmt, key.Name) + t.Errorf(errFmtGetProvider, key.Name) } case *v1.Configuration: if key.Name != c1Name { - t.Errorf(errGetConfigurationFmt, key.Name) + t.Errorf(errFmtGetConfiguration, key.Name) } default: t.Errorf("unexpected type") diff --git a/internal/initializer/webhook_configurations.go b/internal/initializer/webhook_configurations.go index 52d6be461..ad69aa3fc 100644 --- a/internal/initializer/webhook_configurations.go +++ b/internal/initializer/webhook_configurations.go @@ -18,7 +18,6 @@ package initializer import ( "context" - "reflect" "github.com/spf13/afero" admv1 "k8s.io/api/admissionregistration/v1" @@ -124,7 +123,7 @@ func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) err // See https://github.com/kubernetes-sigs/controller-tools/issues/658 conf.SetName("crossplane") default: - return errors.Errorf("only MutatingWebhookConfiguration and ValidatingWebhookConfiguration kinds are accepted, got %s", reflect.TypeOf(obj).String()) + return errors.Errorf("only MutatingWebhookConfiguration and ValidatingWebhookConfiguration kinds are accepted, got %T", obj) } if err := pa.Apply(ctx, obj.(client.Object)); err != nil { return errors.Wrap(err, errApplyWebhookConfiguration) diff --git a/internal/oci/certs.go b/internal/oci/certs.go new file mode 100644 index 000000000..d1e3d2d51 --- /dev/null +++ b/internal/oci/certs.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oci + +import ( + "crypto/x509" + "os" + "path/filepath" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +// ParseCertificatesFromPath parses PEM file containing extra x509 +// certificates(s) and combines them with the built in root CA CertPool. +func ParseCertificatesFromPath(path string) (*x509.CertPool, error) { + // Get the SystemCertPool, continue with an empty pool on error + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + + // Read in the cert file + certs, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, errors.Wrapf(err, "Failed to append %q to RootCAs", path) + } + + // Append our cert to the system pool + if ok := rootCAs.AppendCertsFromPEM(certs); !ok { + return nil, errors.Errorf("No certificates could be parsed from %q", path) + } + + return rootCAs, nil +} diff --git a/internal/oci/pull.go b/internal/oci/pull.go index 69e86087c..4ec4af910 100644 --- a/internal/oci/pull.go +++ b/internal/oci/pull.go @@ -18,6 +18,9 @@ package oci import ( "context" + "crypto/tls" + "crypto/x509" + "net/http" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" @@ -86,8 +89,9 @@ func (a ImagePullAuth) Authorization() (*authn.AuthConfig, error) { // ImageClientOptions configure an ImageClient. type ImageClientOptions struct { - pull ImagePullPolicy - auth *ImagePullAuth + pull ImagePullPolicy + auth *ImagePullAuth + transport *http.Transport } func parse(o ...ImageClientOption) ImageClientOptions { @@ -117,6 +121,14 @@ func WithPullAuth(a *ImagePullAuth) ImageClientOption { } } +// WithCustomCA adds given root certificates to tls client configuration +func WithCustomCA(rootCAs *x509.CertPool) ImageClientOption { + return func(c *ImageClientOptions) { + c.transport = remote.DefaultTransport.(*http.Transport).Clone() + c.transport.TLSClientConfig = &tls.Config{RootCAs: rootCAs, MinVersion: tls.VersionTLS12} + } +} + // An ImageClient is an OCI registry client. type ImageClient interface { // Image pulls an OCI image. @@ -141,13 +153,17 @@ type RemoteClient struct{} // Image fetches an image manifest. The returned image lazily pulls its layers. func (i *RemoteClient) Image(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) { opts := parse(o...) + iOpts := []remote.Option{remote.WithContext(ctx)} if opts.auth != nil { - return remote.Image(ref, remote.WithContext(ctx), remote.WithAuth(opts.auth)) + iOpts = append(iOpts, remote.WithAuth(opts.auth)) + } + if opts.transport != nil { + iOpts = append(iOpts, remote.WithTransport(opts.transport)) } if opts.pull == ImagePullPolicyNever { return nil, errors.New(errPullNever) } - return remote.Image(ref, remote.WithContext(ctx)) + return remote.Image(ref, iOpts...) } // A CachingPuller pulls OCI images. Images are pulled either from a local cache diff --git a/internal/oci/pull_test.go b/internal/oci/pull_test.go index 96a0ccd07..1580e11ad 100644 --- a/internal/oci/pull_test.go +++ b/internal/oci/pull_test.go @@ -18,6 +18,7 @@ package oci import ( "context" + "crypto/x509" "testing" "github.com/google/go-cmp/cmp" @@ -298,6 +299,44 @@ func TestImage(t *testing.T) { i: &MockImage{}, }, }, + "PullWithCustomCA": { + reason: "We should return a pulled and cached image.", + p: NewCachingPuller( + &MockHashCache{ + MockHash: func(r name.Reference) (ociv1.Hash, error) { + return ociv1.Hash{}, errors.New("this error should not be returned") + }, + MockWriteHash: func(r name.Reference, h ociv1.Hash) error { + return nil + }, + }, + &MockImageCache{ + MockWriteImage: func(img ociv1.Image) error { return nil }, + MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return &MockImage{}, nil }, + }, + &MockImageClient{ + MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) { + if len(o) != 1 { + return nil, errors.New("the number of options should be one") + } + c := &ImageClientOptions{} + o[0](c) + if c.transport == nil { + return nil, errors.New("Transport should be set") + } + return &MockImage{ + MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil }, + }, nil + }, + }, + ), + args: args{ + o: []ImageClientOption{WithCustomCA(&x509.CertPool{})}, + }, + want: want{ + i: &MockImage{}, + }, + }, "IfNotPresentTriesCacheFirst": { reason: "The IfNotPresent policy should try to read from cache first.", p: NewCachingPuller( diff --git a/internal/oci/spec/spec.go b/internal/oci/spec/spec.go index 3331c8b04..843a71c31 100644 --- a/internal/oci/spec/spec.go +++ b/internal/oci/spec/spec.go @@ -110,6 +110,12 @@ func New(o ...Option) (*runtime.Spec, error) { Source: "tmpfs", Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, }, + { + Type: "tmpfs", + Destination: "/tmp", + Source: "tmp", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, { Type: "bind", Destination: "/sys", diff --git a/internal/xcrd/crd.go b/internal/xcrd/crd.go index 1ca31b466..c8f65e0a4 100644 --- a/internal/xcrd/crd.go +++ b/internal/xcrd/crd.go @@ -42,7 +42,7 @@ const ( ) const ( - errFmtGetProps = "cannot get %q properties from validation schema" + errFmtGenCrd = "cannot generate CRD for %q %q" errParseValidation = "cannot parse validation schema" errInvalidClaimNames = "invalid resource claim names" errMissingClaimNames = "missing names" @@ -63,7 +63,7 @@ func ForCompositeResource(xrd *v1.CompositeResourceDefinition) (*extv1.CustomRes } crd.SetName(xrd.GetName()) - crd.SetLabels(xrd.GetLabels()) + setCrdMetadata(crd, xrd) crd.SetOwnerReferences([]metav1.OwnerReference{meta.AsController( meta.TypedReferenceTo(xrd, v1.CompositeResourceDefinitionGroupVersionKind), )}) @@ -71,48 +71,15 @@ func ForCompositeResource(xrd *v1.CompositeResourceDefinition) (*extv1.CustomRes crd.Spec.Names.Categories = append(crd.Spec.Names.Categories, CategoryComposite) for i, vr := range xrd.Spec.Versions { - crd.Spec.Versions[i] = extv1.CustomResourceDefinitionVersion{ - Name: vr.Name, - Served: vr.Served, - Storage: vr.Referenceable, - Deprecated: pointer.BoolDeref(vr.Deprecated, false), - DeprecationWarning: vr.DeprecationWarning, - AdditionalPrinterColumns: append(vr.AdditionalPrinterColumns, CompositeResourcePrinterColumns()...), - Schema: &extv1.CustomResourceValidation{ - OpenAPIV3Schema: BaseProps(), - }, - Subresources: &extv1.CustomResourceSubresources{ - Status: &extv1.CustomResourceSubresourceStatus{}, - }, - } - - p, required, err := getProps("spec", vr.Schema) + crdv, err := genCrdVersion(vr) if err != nil { - return nil, errors.Wrapf(err, errFmtGetProps, "spec") - } - specProps := crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["spec"] - specProps.Required = append(specProps.Required, required...) - for k, v := range p { - specProps.Properties[k] = v + return nil, errors.Wrapf(err, errFmtGenCrd, "Composite Resource", xrd.Name) } + crdv.AdditionalPrinterColumns = append(crdv.AdditionalPrinterColumns, CompositeResourcePrinterColumns()...) for k, v := range CompositeResourceSpecProps() { - specProps.Properties[k] = v - } - crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["spec"] = specProps - - statusP, statusRequired, err := getProps("status", vr.Schema) - if err != nil { - return nil, errors.Wrapf(err, errFmtGetProps, "status") - } - statusProps := crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["status"] - statusProps.Required = statusRequired - for k, v := range statusP { - statusProps.Properties[k] = v - } - for k, v := range CompositeResourceStatusProps() { - statusProps.Properties[k] = v + crdv.Schema.OpenAPIV3Schema.Properties["spec"].Properties[k] = v } - crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["status"] = statusProps + crd.Spec.Versions[i] = *crdv } return crd, nil @@ -136,7 +103,7 @@ func ForCompositeResourceClaim(xrd *v1.CompositeResourceDefinition) (*extv1.Cust } crd.SetName(xrd.Spec.ClaimNames.Plural + "." + xrd.Spec.Group) - crd.SetLabels(xrd.GetLabels()) + setCrdMetadata(crd, xrd) crd.SetOwnerReferences([]metav1.OwnerReference{meta.AsController( meta.TypedReferenceTo(xrd, v1.CompositeResourceDefinitionGroupVersionKind), )}) @@ -144,53 +111,67 @@ func ForCompositeResourceClaim(xrd *v1.CompositeResourceDefinition) (*extv1.Cust crd.Spec.Names.Categories = append(crd.Spec.Names.Categories, CategoryClaim) for i, vr := range xrd.Spec.Versions { - crd.Spec.Versions[i] = extv1.CustomResourceDefinitionVersion{ - Name: vr.Name, - Served: vr.Served, - Storage: vr.Referenceable, - Deprecated: pointer.BoolDeref(vr.Deprecated, false), - DeprecationWarning: vr.DeprecationWarning, - AdditionalPrinterColumns: append(vr.AdditionalPrinterColumns, CompositeResourceClaimPrinterColumns()...), - Schema: &extv1.CustomResourceValidation{ - OpenAPIV3Schema: BaseProps(), - }, - Subresources: &extv1.CustomResourceSubresources{ - Status: &extv1.CustomResourceSubresourceStatus{}, - }, - } - - p, required, err := getProps("spec", vr.Schema) + crdv, err := genCrdVersion(vr) if err != nil { - return nil, errors.Wrapf(err, errFmtGetProps, "spec") - } - specProps := crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["spec"] - specProps.Required = append(specProps.Required, required...) - for k, v := range p { - specProps.Properties[k] = v + return nil, errors.Wrapf(err, errFmtGenCrd, "Composite Resource Claim", xrd.Name) } + crdv.AdditionalPrinterColumns = append(crdv.AdditionalPrinterColumns, CompositeResourceClaimPrinterColumns()...) for k, v := range CompositeResourceClaimSpecProps() { - specProps.Properties[k] = v - } - crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["spec"] = specProps - - statusP, statusRequired, err := getProps("status", vr.Schema) - if err != nil { - return nil, errors.Wrapf(err, errFmtGetProps, "status") - } - statusProps := crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["status"] - statusProps.Required = statusRequired - for k, v := range statusP { - statusProps.Properties[k] = v + crdv.Schema.OpenAPIV3Schema.Properties["spec"].Properties[k] = v } - for k, v := range CompositeResourceStatusProps() { - statusProps.Properties[k] = v - } - crd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties["status"] = statusProps + crd.Spec.Versions[i] = *crdv } return crd, nil } +func genCrdVersion(vr v1.CompositeResourceDefinitionVersion) (*extv1.CustomResourceDefinitionVersion, error) { + crdv := extv1.CustomResourceDefinitionVersion{ + Name: vr.Name, + Served: vr.Served, + Storage: vr.Referenceable, + Deprecated: pointer.BoolDeref(vr.Deprecated, false), + DeprecationWarning: vr.DeprecationWarning, + AdditionalPrinterColumns: vr.AdditionalPrinterColumns, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: BaseProps(), + }, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + } + s, err := parseSchema(vr.Schema) + if err != nil { + return nil, errors.Wrapf(err, errParseValidation) + } + crdv.Schema.OpenAPIV3Schema.Description = s.Description + + xSpec := s.Properties["spec"] + cSpec := crdv.Schema.OpenAPIV3Schema.Properties["spec"] + cSpec.Required = append(cSpec.Required, xSpec.Required...) + cSpec.XValidations = append(cSpec.XValidations, xSpec.XValidations...) + + cSpec.Description = xSpec.Description + for k, v := range xSpec.Properties { + cSpec.Properties[k] = v + } + crdv.Schema.OpenAPIV3Schema.Properties["spec"] = cSpec + + xStatus := s.Properties["status"] + cStatus := crdv.Schema.OpenAPIV3Schema.Properties["status"] + cStatus.Required = xStatus.Required + cStatus.XValidations = xStatus.XValidations + cStatus.Description = xStatus.Description + for k, v := range xStatus.Properties { + cStatus.Properties[k] = v + } + for k, v := range CompositeResourceStatusProps() { + cStatus.Properties[k] = v + } + crdv.Schema.OpenAPIV3Schema.Properties["status"] = cStatus + return &crdv, nil +} + func validateClaimNames(d *v1.CompositeResourceDefinition) error { if d.Spec.ClaimNames == nil { return errors.New(errMissingClaimNames) @@ -215,22 +196,37 @@ func validateClaimNames(d *v1.CompositeResourceDefinition) error { return nil } -func getProps(field string, v *v1.CompositeResourceValidation) (map[string]extv1.JSONSchemaProps, []string, error) { +func parseSchema(v *v1.CompositeResourceValidation) (*extv1.JSONSchemaProps, error) { if v == nil { - return nil, nil, nil + return nil, nil } s := &extv1.JSONSchemaProps{} if err := json.Unmarshal(v.OpenAPIV3Schema.Raw, s); err != nil { - return nil, nil, errors.Wrap(err, errParseValidation) + return nil, errors.Wrap(err, errParseValidation) } + return s, nil +} - spec, ok := s.Properties[field] - if !ok { - return nil, nil, nil +// setCrdMetadata sets the labels and annotations on the CRD. +func setCrdMetadata(crd *extv1.CustomResourceDefinition, xrd *v1.CompositeResourceDefinition) *extv1.CustomResourceDefinition { + crd.SetLabels(xrd.GetLabels()) + if xrd.Spec.Metadata != nil { + if xrd.Spec.Metadata.Labels != nil { + inheritedLabels := crd.GetLabels() + if inheritedLabels == nil { + inheritedLabels = map[string]string{} + } + for k, v := range xrd.Spec.Metadata.Labels { + inheritedLabels[k] = v + } + crd.SetLabels(inheritedLabels) + } + if xrd.Spec.Metadata.Annotations != nil { + crd.SetAnnotations(xrd.Spec.Metadata.Annotations) + } } - - return spec.Properties, spec.Required, nil + return crd } // IsEstablished is a helper function to check whether api-server is ready diff --git a/internal/xcrd/crd_test.go b/internal/xcrd/crd_test.go index 7ae3bc9ec..3fec78d33 100644 --- a/internal/xcrd/crd_test.go +++ b/internal/xcrd/crd_test.go @@ -82,39 +82,55 @@ func TestForCompositeResource(t *testing.T) { schema := ` { - "required": [ - "spec" - ], - "properties": { - "spec": { - "required": [ - "storageGB", - "engineVersion" - ], - "properties": { - "engineVersion": { - "enum": [ - "5.6", - "5.7" - ], - "type": "string" - }, - "storageGB": { - "type": "integer" - } - }, - "type": "object" - }, - "status": { - "properties": { - "phase": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" + "required": [ + "spec" + ], + "properties": { + "spec": { + "description": "Specification of the resource.", + "required": [ + "storageGB", + "engineVersion" + ], + "properties": { + "engineVersion": { + "enum": [ + "5.6", + "5.7" + ], + "type": "string" + }, + "storageGB": { + "type": "integer", + "description": "Pretend this is useful." + } + }, + "x-kubernetes-validations": [ + { + "message": "Cannot change engine version", + "rule": "self.engineVersion == oldSelf.engineVersion" + } + ], + "type": "object" + }, + "status": { + "properties": { + "phase": { + "type": "string" + } + }, + "x-kubernetes-validations": [ + { + "message": "Phase is required once set", + "rule": "!has(oldSelf.phase) || has(self.phase)" + } + ], + "type": "object", + "description": "Status of the resource." + } + }, + "type": "object", + "description": "What the resource is for." }` d := &v1.CompositeResourceDefinition{ @@ -192,8 +208,9 @@ func TestForCompositeResource(t *testing.T) { }, Schema: &extv1.CustomResourceValidation{ OpenAPIV3Schema: &extv1.JSONSchemaProps{ - Type: "object", - Required: []string{"spec"}, + Type: "object", + Description: "What the resource is for.", + Required: []string{"spec"}, Properties: map[string]extv1.JSONSchemaProps{ "apiVersion": { Type: "string", @@ -207,11 +224,12 @@ func TestForCompositeResource(t *testing.T) { Type: "object", }, "spec": { - Type: "object", - Required: []string{"storageGB", "engineVersion"}, + Type: "object", + Required: []string{"storageGB", "engineVersion"}, + Description: "Specification of the resource.", Properties: map[string]extv1.JSONSchemaProps{ // From CRDSpecTemplate.Validation - "storageGB": {Type: "integer"}, + "storageGB": {Type: "integer", Description: "Pretend this is useful."}, "engineVersion": { Type: "string", Enum: []extv1.JSON{ @@ -353,9 +371,16 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + XValidations: extv1.ValidationRules{ + { + Message: "Cannot change engine version", + Rule: "self.engineVersion == oldSelf.engineVersion", + }, + }, }, "status": { - Type: "object", + Type: "object", + Description: "Status of the resource.", Properties: map[string]extv1.JSONSchemaProps{ "phase": {Type: "string"}, @@ -384,6 +409,12 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + XValidations: extv1.ValidationRules{ + { + Message: "Phase is required once set", + Rule: "!has(oldSelf.phase) || has(self.phase)", + }, + }, }, }, }, @@ -402,155 +433,19 @@ func TestForCompositeResource(t *testing.T) { } } -func TestValidateClaimNames(t *testing.T) { - cases := map[string]struct { - d *v1.CompositeResourceDefinition - want error - }{ - "MissingClaimNames": { - d: &v1.CompositeResourceDefinition{}, - want: errors.New(errMissingClaimNames), - }, - "KindConflict": { - d: &v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{ - ClaimNames: &extv1.CustomResourceDefinitionNames{ - Kind: "a", - ListKind: "a", - Singular: "a", - Plural: "a", - }, - Names: extv1.CustomResourceDefinitionNames{ - Kind: "a", - ListKind: "b", - Singular: "b", - Plural: "b", - }, - }, - }, - want: errors.Errorf(errFmtConflictingClaimName, "a"), - }, - "ListKindConflict": { - d: &v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{ - ClaimNames: &extv1.CustomResourceDefinitionNames{ - Kind: "a", - ListKind: "a", - Singular: "a", - Plural: "a", - }, - Names: extv1.CustomResourceDefinitionNames{ - Kind: "b", - ListKind: "a", - Singular: "b", - Plural: "b", - }, - }, - }, - want: errors.Errorf(errFmtConflictingClaimName, "a"), - }, - "SingularConflict": { - d: &v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{ - ClaimNames: &extv1.CustomResourceDefinitionNames{ - Kind: "a", - ListKind: "a", - Singular: "a", - Plural: "a", - }, - Names: extv1.CustomResourceDefinitionNames{ - Kind: "b", - ListKind: "b", - Singular: "a", - Plural: "b", - }, - }, - }, - want: errors.Errorf(errFmtConflictingClaimName, "a"), - }, - "PluralConflict": { - d: &v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{ - ClaimNames: &extv1.CustomResourceDefinitionNames{ - Kind: "a", - ListKind: "a", - Singular: "a", - Plural: "a", - }, - Names: extv1.CustomResourceDefinitionNames{ - Kind: "b", - ListKind: "b", - Singular: "b", - Plural: "a", - Categories: []string{CategoryClaim}, - }, - }, - }, - want: errors.Errorf(errFmtConflictingClaimName, "a"), - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - got := validateClaimNames(tc.d) - if diff := cmp.Diff(tc.want, got, test.EquateErrors()); diff != "" { - t.Errorf("validateClaimNames(...): -want, +got:\n%s", diff) - } - }) - } -} - -func TestForCompositeResourceClaim(t *testing.T) { +func TestForCompositeResourceEmptyXrd(t *testing.T) { name := "coolcomposites.example.org" labels := map[string]string{"cool": "very"} annotations := map[string]string{"example.org/cool": "very"} group := "example.org" version := "v1" - kind := "CoolComposite" listKind := "CoolCompositeList" singular := "coolcomposite" plural := "coolcomposites" - claimKind := "CoolClaim" - claimListKind := "CoolClaimList" - claimSingular := "coolclaim" - claimPlural := "coolclaims" - - schema := ` -{ - "properties": { - "spec": { - "required": [ - "storageGB", - "engineVersion" - ], - "properties": { - "engineVersion": { - "enum": [ - "5.6", - "5.7" - ], - "type": "string" - }, - "storageGB": { - "type": "integer" - } - }, - "type": "object" - }, - "status": { - "properties": { - "phase": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" -}` + schema := "{}" d := &v1.CompositeResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ @@ -567,12 +462,6 @@ func TestForCompositeResourceClaim(t *testing.T) { Kind: kind, ListKind: listKind, }, - ClaimNames: &extv1.CustomResourceDefinitionNames{ - Plural: claimPlural, - Singular: claimSingular, - Kind: claimKind, - ListKind: claimListKind, - }, Versions: []v1.CompositeResourceDefinitionVersion{{ Name: version, Referenceable: true, @@ -586,7 +475,7 @@ func TestForCompositeResourceClaim(t *testing.T) { want := &extv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: claimPlural + "." + group, + Name: name, Labels: labels, OwnerReferences: []metav1.OwnerReference{ meta.AsController(meta.TypedReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind)), @@ -595,75 +484,817 @@ func TestForCompositeResourceClaim(t *testing.T) { Spec: extv1.CustomResourceDefinitionSpec{ Group: group, Names: extv1.CustomResourceDefinitionNames{ - Plural: claimPlural, - Singular: claimSingular, - Kind: claimKind, - ListKind: claimListKind, - Categories: []string{CategoryClaim}, + Plural: plural, + Singular: singular, + Kind: kind, + ListKind: listKind, + Categories: []string{CategoryComposite}, }, - Scope: extv1.NamespaceScoped, - Versions: []extv1.CustomResourceDefinitionVersion{ - { - Name: version, - Served: true, - Storage: true, - Subresources: &extv1.CustomResourceSubresources{ - Status: &extv1.CustomResourceSubresourceStatus{}, + Scope: extv1.ClusterScoped, + Versions: []extv1.CustomResourceDefinitionVersion{{ + Name: version, + Served: true, + Storage: true, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ + { + Name: "SYNCED", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Synced')].status", }, - AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ - { - Name: "SYNCED", - Type: "string", - JSONPath: ".status.conditions[?(@.type=='Synced')].status", - }, - { - Name: "READY", - Type: "string", - JSONPath: ".status.conditions[?(@.type=='Ready')].status", - }, - { - Name: "CONNECTION-SECRET", - Type: "string", - JSONPath: ".spec.writeConnectionSecretToRef.name", - }, - { - Name: "AGE", - Type: "date", - JSONPath: ".metadata.creationTimestamp", - }, + { + Name: "READY", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Ready')].status", }, - Schema: &extv1.CustomResourceValidation{ - OpenAPIV3Schema: &extv1.JSONSchemaProps{ - Type: "object", - Required: []string{"spec"}, - Properties: map[string]extv1.JSONSchemaProps{ - "apiVersion": { - Type: "string", - }, - "kind": { - Type: "string", - }, - "metadata": { - // NOTE(muvaf): api-server takes care of validating - // metadata. - Type: "object", - }, - "spec": { - Type: "object", - Required: []string{"storageGB", "engineVersion"}, - Properties: map[string]extv1.JSONSchemaProps{ - // From CRDSpecTemplate.Validation - "storageGB": {Type: "integer"}, - "engineVersion": { - Type: "string", - Enum: []extv1.JSON{ - {Raw: []byte(`"5.6"`)}, - {Raw: []byte(`"5.7"`)}, - }, - }, - "compositeDeletePolicy": { - Type: "string", - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, + { + Name: "COMPOSITION", + Type: "string", + JSONPath: ".spec.compositionRef.name", + }, + { + Name: "AGE", + Type: "date", + JSONPath: ".metadata.creationTimestamp", + }, + }, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: &extv1.JSONSchemaProps{ + Type: "object", + Description: "", + Required: []string{"spec"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + "metadata": { + // NOTE(muvaf): api-server takes care of validating + // metadata. + Type: "object", + }, + "spec": { + Type: "object", + Description: "", + Properties: map[string]extv1.JSONSchemaProps{ + // From CompositeResourceSpecProps() + "compositionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionRevisionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionRevisionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionUpdatePolicy": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"Automatic"`)}, + {Raw: []byte(`"Manual"`)}, + }, + }, + "claimRef": { + Type: "object", + Required: []string{"apiVersion", "kind", "namespace", "name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "kind": {Type: "string"}, + "namespace": {Type: "string"}, + "name": {Type: "string"}, + }, + }, + "environmentConfigRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + }, + "resourceRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + }, + "publishConnectionDetailsTo": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "configRef": { + Type: "object", + Default: &extv1.JSON{Raw: []byte(`{"name": "default"}`)}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + }, + }, + "metadata": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "labels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "annotations": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "type": { + Type: "string", + }, + }, + }, + }, + }, + "writeConnectionSecretToRef": { + Type: "object", + Required: []string{"name", "namespace"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "namespace": {Type: "string"}, + }, + }, + }, + }, + "status": { + Type: "object", + Description: "", + Properties: map[string]extv1.JSONSchemaProps{ + + // From CompositeResourceStatusProps() + "conditions": { + Description: "Conditions of the resource.", + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"lastTransitionTime", "reason", "status", "type"}, + Properties: map[string]extv1.JSONSchemaProps{ + "lastTransitionTime": {Type: "string", Format: "date-time"}, + "message": {Type: "string"}, + "reason": {Type: "string"}, + "status": {Type: "string"}, + "type": {Type: "string"}, + }, + }, + }, + }, + "connectionDetails": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "lastPublishedTime": {Type: "string", Format: "date-time"}, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + } + + got, err := ForCompositeResource(d) + if err != nil { + t.Fatalf("ForCompositeResource(...): %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("ForCompositeResource(...): -want, +got:\n%s", diff) + } +} + +func TestValidateClaimNames(t *testing.T) { + cases := map[string]struct { + d *v1.CompositeResourceDefinition + want error + }{ + "MissingClaimNames": { + d: &v1.CompositeResourceDefinition{}, + want: errors.New(errMissingClaimNames), + }, + "KindConflict": { + d: &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "a", + ListKind: "a", + Singular: "a", + Plural: "a", + }, + Names: extv1.CustomResourceDefinitionNames{ + Kind: "a", + ListKind: "b", + Singular: "b", + Plural: "b", + }, + }, + }, + want: errors.Errorf(errFmtConflictingClaimName, "a"), + }, + "ListKindConflict": { + d: &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "a", + ListKind: "a", + Singular: "a", + Plural: "a", + }, + Names: extv1.CustomResourceDefinitionNames{ + Kind: "b", + ListKind: "a", + Singular: "b", + Plural: "b", + }, + }, + }, + want: errors.Errorf(errFmtConflictingClaimName, "a"), + }, + "SingularConflict": { + d: &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "a", + ListKind: "a", + Singular: "a", + Plural: "a", + }, + Names: extv1.CustomResourceDefinitionNames{ + Kind: "b", + ListKind: "b", + Singular: "a", + Plural: "b", + }, + }, + }, + want: errors.Errorf(errFmtConflictingClaimName, "a"), + }, + "PluralConflict": { + d: &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "a", + ListKind: "a", + Singular: "a", + Plural: "a", + }, + Names: extv1.CustomResourceDefinitionNames{ + Kind: "b", + ListKind: "b", + Singular: "b", + Plural: "a", + Categories: []string{CategoryClaim}, + }, + }, + }, + want: errors.Errorf(errFmtConflictingClaimName, "a"), + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := validateClaimNames(tc.d) + if diff := cmp.Diff(tc.want, got, test.EquateErrors()); diff != "" { + t.Errorf("validateClaimNames(...): -want, +got:\n%s", diff) + } + }) + } +} + +func TestForCompositeResourceClaim(t *testing.T) { + name := "coolcomposites.example.org" + labels := map[string]string{"cool": "very"} + annotations := map[string]string{"example.org/cool": "very"} + + group := "example.org" + version := "v1" + + kind := "CoolComposite" + listKind := "CoolCompositeList" + singular := "coolcomposite" + plural := "coolcomposites" + + claimKind := "CoolClaim" + claimListKind := "CoolClaimList" + claimSingular := "coolclaim" + claimPlural := "coolclaims" + + schema := ` +{ + "properties": { + "spec": { + "description": "Specification of the resource.", + "required": [ + "storageGB", + "engineVersion" + ], + "properties": { + "engineVersion": { + "enum": [ + "5.6", + "5.7" + ], + "type": "string" + }, + "storageGB": { + "type": "integer", + "description": "Pretend this is useful." + } + }, + "x-kubernetes-validations": [ + { + "message": "Cannot change engine version", + "rule": "self.engineVersion == oldSelf.engineVersion" + } + ], + "type": "object" + }, + "status": { + "properties": { + "phase": { + "type": "string" + } + }, + "x-kubernetes-validations": [ + { + "message": "Phase is required once set", + "rule": "!has(oldSelf.phase) || has(self.phase)" + } + ], + "type": "object", + "description": "Status of the resource." + } + }, + "type": "object", + "description": "Description of the resource." +}` + + d := &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + Annotations: annotations, + UID: types.UID("you-you-eye-dee"), + }, + Spec: v1.CompositeResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: plural, + Singular: singular, + Kind: kind, + ListKind: listKind, + }, + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Plural: claimPlural, + Singular: claimSingular, + Kind: claimKind, + ListKind: claimListKind, + }, + Versions: []v1.CompositeResourceDefinitionVersion{{ + Name: version, + Referenceable: true, + Served: true, + Schema: &v1.CompositeResourceValidation{ + OpenAPIV3Schema: runtime.RawExtension{Raw: []byte(schema)}, + }, + }}, + }, + } + + want := &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimPlural + "." + group, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + meta.AsController(meta.TypedReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind)), + }, + }, + Spec: extv1.CustomResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: claimPlural, + Singular: claimSingular, + Kind: claimKind, + ListKind: claimListKind, + Categories: []string{CategoryClaim}, + }, + Scope: extv1.NamespaceScoped, + Versions: []extv1.CustomResourceDefinitionVersion{ + { + Name: version, + Served: true, + Storage: true, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ + { + Name: "SYNCED", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Synced')].status", + }, + { + Name: "READY", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Ready')].status", + }, + { + Name: "CONNECTION-SECRET", + Type: "string", + JSONPath: ".spec.writeConnectionSecretToRef.name", + }, + { + Name: "AGE", + Type: "date", + JSONPath: ".metadata.creationTimestamp", + }, + }, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"spec"}, + Description: "Description of the resource.", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + "metadata": { + // NOTE(muvaf): api-server takes care of validating + // metadata. + Type: "object", + }, + "spec": { + Type: "object", + Required: []string{"storageGB", "engineVersion"}, + Description: "Specification of the resource.", + Properties: map[string]extv1.JSONSchemaProps{ + // From CRDSpecTemplate.Validation + "storageGB": {Type: "integer", Description: "Pretend this is useful."}, + "engineVersion": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"5.6"`)}, + {Raw: []byte(`"5.7"`)}, + }, + }, + "compositeDeletePolicy": { + Type: "string", + Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}}, + }, + // From CompositeResourceClaimSpecProps() + "compositionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionRevisionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionRevisionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionUpdatePolicy": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"Automatic"`)}, + {Raw: []byte(`"Manual"`)}, + }, + }, + "resourceRef": { + Type: "object", + Required: []string{"apiVersion", "kind", "name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "kind": {Type: "string"}, + "name": {Type: "string"}, + }, + }, + "publishConnectionDetailsTo": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "configRef": { + Type: "object", + Default: &extv1.JSON{Raw: []byte(`{"name": "default"}`)}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + }, + }, + "metadata": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "labels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "annotations": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "type": { + Type: "string", + }, + }, + }, + }, + }, + "writeConnectionSecretToRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Cannot change engine version", + Rule: "self.engineVersion == oldSelf.engineVersion", + }, + }, + }, + "status": { + Type: "object", + Description: "Status of the resource.", + Properties: map[string]extv1.JSONSchemaProps{ + "phase": {Type: "string"}, + + // From CompositeResourceStatusProps() + "conditions": { + Description: "Conditions of the resource.", + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"lastTransitionTime", "reason", "status", "type"}, + Properties: map[string]extv1.JSONSchemaProps{ + "lastTransitionTime": {Type: "string", Format: "date-time"}, + "message": {Type: "string"}, + "reason": {Type: "string"}, + "status": {Type: "string"}, + "type": {Type: "string"}, + }, + }, + }, + }, + "connectionDetails": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "lastPublishedTime": {Type: "string", Format: "date-time"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Phase is required once set", + Rule: "!has(oldSelf.phase) || has(self.phase)", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + got, err := ForCompositeResourceClaim(d) + if err != nil { + t.Fatalf("ForCompositeResourceClaim(...): %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("ForCompositeResourceClaim(...): -want, +got:\n%s", diff) + } +} + +func TestForCompositeResourceClaimEmptyXrd(t *testing.T) { + name := "coolcomposites.example.org" + labels := map[string]string{"cool": "very"} + annotations := map[string]string{"example.org/cool": "very"} + + group := "example.org" + version := "v1" + + kind := "CoolComposite" + listKind := "CoolCompositeList" + singular := "coolcomposite" + plural := "coolcomposites" + + claimKind := "CoolClaim" + claimListKind := "CoolClaimList" + claimSingular := "coolclaim" + claimPlural := "coolclaims" + + schema := "{}" + + d := &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + Annotations: annotations, + UID: types.UID("you-you-eye-dee"), + }, + Spec: v1.CompositeResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: plural, + Singular: singular, + Kind: kind, + ListKind: listKind, + }, + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Plural: claimPlural, + Singular: claimSingular, + Kind: claimKind, + ListKind: claimListKind, + }, + Versions: []v1.CompositeResourceDefinitionVersion{{ + Name: version, + Referenceable: true, + Served: true, + Schema: &v1.CompositeResourceValidation{ + OpenAPIV3Schema: runtime.RawExtension{Raw: []byte(schema)}, + }, + }}, + }, + } + + want := &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimPlural + "." + group, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + meta.AsController(meta.TypedReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind)), + }, + }, + Spec: extv1.CustomResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: claimPlural, + Singular: claimSingular, + Kind: claimKind, + ListKind: claimListKind, + Categories: []string{CategoryClaim}, + }, + Scope: extv1.NamespaceScoped, + Versions: []extv1.CustomResourceDefinitionVersion{ + { + Name: version, + Served: true, + Storage: true, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ + { + Name: "SYNCED", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Synced')].status", + }, + { + Name: "READY", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Ready')].status", + }, + { + Name: "CONNECTION-SECRET", + Type: "string", + JSONPath: ".spec.writeConnectionSecretToRef.name", + }, + { + Name: "AGE", + Type: "date", + JSONPath: ".metadata.creationTimestamp", + }, + }, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"spec"}, + Description: "", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + "metadata": { + // NOTE(muvaf): api-server takes care of validating + // metadata. + Type: "object", + }, + "spec": { + Type: "object", + Description: "", + Properties: map[string]extv1.JSONSchemaProps{ + "compositeDeletePolicy": { + Type: "string", + Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, {Raw: []byte(`"Foreground"`)}}, }, // From CompositeResourceClaimSpecProps() @@ -771,10 +1402,9 @@ func TestForCompositeResourceClaim(t *testing.T) { }, }, "status": { - Type: "object", + Type: "object", + Description: "", Properties: map[string]extv1.JSONSchemaProps{ - "phase": {Type: "string"}, - // From CompositeResourceStatusProps() "conditions": { Description: "Conditions of the resource.", @@ -818,3 +1448,189 @@ func TestForCompositeResourceClaim(t *testing.T) { t.Errorf("ForCompositeResourceClaim(...): -want, +got:\n%s", diff) } } + +func TestSetCrdMetadata(t *testing.T) { + type args struct { + crd *extv1.CustomResourceDefinition + xrd *v1.CompositeResourceDefinition + } + tests := []struct { + name string + args args + want *extv1.CustomResourceDefinition + }{ + { + name: "set crd annotations", + args: args{ + crd: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + xrd: &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + "example.com/some-xrd-annotation": "not-propagated", + }, + }, + Spec: v1.CompositeResourceDefinitionSpec{Metadata: &v1.CompositeResourceDefinitionSpecMetadata{ + Annotations: map[string]string{ + "cert-manager.io/inject-ca-from": "example1-ns/webhook1-certificate", + }, + }}, + }, + }, + want: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + "cert-manager.io/inject-ca-from": "example1-ns/webhook1-certificate", + }, + }, + }, + }, + { + name: "set crd labels", + args: args{ + crd: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + xrd: &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: v1.CompositeResourceDefinitionSpec{Metadata: &v1.CompositeResourceDefinitionSpecMetadata{ + Labels: map[string]string{ + "example.com/some-crd-label": "value1", + "example.com/some-additional-crd-label": "value2", + }, + }}, + }, + }, + want: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + "example.com/some-crd-label": "value1", + "example.com/some-additional-crd-label": "value2", + }, + }, + }, + }, + { + name: "append labels", + args: args{ + crd: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + xrd: &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + "example.com/some-xrd-label": "value1", + "example.com/some-additional-xrd-label": "value2", + }, + }, + Spec: v1.CompositeResourceDefinitionSpec{Metadata: &v1.CompositeResourceDefinitionSpecMetadata{ + Labels: map[string]string{ + "example.com/some-crd-label": "value3", + "example.com/some-additional-crd-label": "value4", + }, + }}, + }, + }, + want: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + "example.com/some-xrd-label": "value1", + "example.com/some-additional-xrd-label": "value2", + "example.com/some-crd-label": "value3", + "example.com/some-additional-crd-label": "value4", + }, + }, + }, + }, + { + name: "labels and annotations", + args: args{ + crd: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + xrd: &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + "example.com/some-xrd-annotation": "not-propagated", + "example.com/some-additional-xrd-label-annotation": "not-propagated", + }, + Labels: map[string]string{ + "example.com/some-xrd-label": "value1", + "example.com/some-additional-xrd-label": "value2", + }, + }, + Spec: v1.CompositeResourceDefinitionSpec{Metadata: &v1.CompositeResourceDefinitionSpecMetadata{ + Annotations: map[string]string{ + "example.com/some-crd-annotation": "value1", + "example.com/some-additional-crd-label-annotation": "value2", + }, + Labels: map[string]string{ + "example.com/some-crd-label": "value3", + "example.com/some-additional-crd-label": "value4", + }, + }}, + }, + }, + want: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + "example.com/some-crd-annotation": "value1", + "example.com/some-additional-crd-label-annotation": "value2", + }, + Labels: map[string]string{ + "example.com/some-xrd-label": "value1", + "example.com/some-additional-xrd-label": "value2", + "example.com/some-crd-label": "value3", + "example.com/some-additional-crd-label": "value4", + }, + }, + }, + }, + { + name: "no labels and no annotations", + args: args{ + crd: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + xrd: &v1.CompositeResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + }, + want: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := setCrdMetadata(tt.args.crd, tt.args.xrd) + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("setCrdMetadata(...): -want, +got:\n%s", diff) + } + }) + } +} diff --git a/internal/xcrd/schemas.go b/internal/xcrd/schemas.go index 09ac176b2..29cd074ba 100644 --- a/internal/xcrd/schemas.go +++ b/internal/xcrd/schemas.go @@ -25,6 +25,9 @@ const ( LabelKeyClaimNamespace = "crossplane.io/claim-namespace" ) +// CompositionRevisionRef should be propagated dynamically +var CompositionRevisionRef = "compositionRevisionRef" + // PropagateSpecProps is the list of XRC spec properties to propagate // when translating an XRC into an XR and vice-versa. var PropagateSpecProps = []string{"compositionRef", "compositionSelector", "compositionUpdatePolicy", "compositionRevisionSelector"} diff --git a/internal/xfn/container.go b/internal/xfn/container.go index 461a991ce..b6351ec58 100644 --- a/internal/xfn/container.go +++ b/internal/xfn/container.go @@ -43,10 +43,11 @@ type ContainerRunner struct { log logging.Logger - rootUID int - rootGID int - setuid bool // Specifically, CAP_SETUID and CAP_SETGID. - cache string + rootUID int + rootGID int + setuid bool // Specifically, CAP_SETUID and CAP_SETGID. + cache string + registry string } // A ContainerRunnerOption configures a new ContainerRunner. @@ -78,6 +79,14 @@ func WithCacheDir(d string) ContainerRunnerOption { } } +// WithRegistry specifies the default registry used to retrieve function images and +// containers. +func WithRegistry(dr string) ContainerRunnerOption { + return func(r *ContainerRunner) { + r.registry = dr + } +} + // WithLogger configures which logger the container runner should use. Logging // is disabled by default. func WithLogger(l logging.Logger) ContainerRunnerOption { diff --git a/internal/xfn/container_linux.go b/internal/xfn/container_linux.go index 675198830..2b083dbd2 100644 --- a/internal/xfn/container_linux.go +++ b/internal/xfn/container_linux.go @@ -95,10 +95,11 @@ func (r *ContainerRunner) RunFunction(ctx context.Context, req *v1alpha1.RunFunc Therefore we execute a shim - xfn spark - in a new user and mount namespace. spark fetches and caches the image, creates an OCI runtime - bundle, then then executes an OCI runtime in order to actually execute + bundle, then executes an OCI runtime in order to actually execute the function. */ - cmd := exec.CommandContext(ctx, os.Args[0], spark, "--cache-dir="+r.cache, fmt.Sprintf("--max-stdio-bytes=%d", MaxStdioBytes)) //nolint:gosec // We're intentionally executing with variable input. + cmd := exec.CommandContext(ctx, os.Args[0], spark, "--cache-dir="+r.cache, "--registry="+r.registry, //nolint:gosec // We're intentionally executing with variable input. + fmt.Sprintf("--max-stdio-bytes=%d", MaxStdioBytes)) cmd.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS, UidMappings: []syscall.SysProcIDMap{{ContainerID: 0, HostID: r.rootUID, Size: 1}}, diff --git a/internal/xpkg/fetch.go b/internal/xpkg/fetch.go index c29135f1e..5b10f3321 100644 --- a/internal/xpkg/fetch.go +++ b/internal/xpkg/fetch.go @@ -22,8 +22,6 @@ import ( "crypto/x509" "io" "net/http" - "os" - "path/filepath" "github.com/google/go-containerregistry/pkg/authn/k8schain" "github.com/google/go-containerregistry/pkg/name" @@ -64,29 +62,6 @@ type K8sFetcher struct { // FetcherOpt can be used to add optional parameters to NewK8sFetcher type FetcherOpt func(k *K8sFetcher) error -// ParseCertificatesFromPath parses PEM file containing extra x509 -// certificates(s) and combines them with the built in root CA CertPool. -func ParseCertificatesFromPath(path string) (*x509.CertPool, error) { - // Get the SystemCertPool, continue with an empty pool on error - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - - // Read in the cert file - certs, err := os.ReadFile(filepath.Clean(path)) - if err != nil { - return nil, errors.Wrapf(err, "Failed to append %q to RootCAs", path) - } - - // Append our cert to the system pool - if ok := rootCAs.AppendCertsFromPEM(certs); !ok { - return nil, errors.Errorf("No certificates could be parsed from %q", path) - } - - return rootCAs, nil -} - // WithCustomCA is a FetcherOpt that can be used to add a custom CA bundle to a K8sFetcher. func WithCustomCA(rootCAs *x509.CertPool) FetcherOpt { return func(k *K8sFetcher) error { diff --git a/internal/xpkg/lint.go b/internal/xpkg/lint.go index 158bcf0b3..80f30c43b 100644 --- a/internal/xpkg/lint.go +++ b/internal/xpkg/lint.go @@ -28,6 +28,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + pkgmetav1alpha1 "github.com/crossplane/crossplane/apis/pkg/meta/v1alpha1" "github.com/crossplane/crossplane/internal/version" ) @@ -36,13 +37,14 @@ const ( errNotMeta = "meta type is not a package" errNotMetaProvider = "package meta type is not Provider" errNotMetaConfiguration = "package meta type is not Configuration" + errNotMetaFunction = "package meta type is not Function" errNotCRD = "object is not a CRD" errNotXRD = "object is not an XRD" errNotMutatingWebhookConfiguration = "object is not a MutatingWebhookConfiguration" errNotValidatingWebhookConfiguration = "object is not an ValidatingWebhookConfiguration" errNotComposition = "object is not a Composition" errBadConstraints = "package version constraints are poorly formatted" - errCrossplaneIncompatibleFmt = "package is not compatible with Crossplane version (%s)" + errFmtCrossplaneIncompatible = "package is not compatible with Crossplane version (%s)" ) // NewProviderLinter is a convenience function for creating a package linter for @@ -62,6 +64,12 @@ func NewConfigurationLinter() parser.Linter { return parser.NewPackageLinter(parser.PackageLinterFns(OneMeta), parser.ObjectLinterFns(IsConfiguration, PackageValidSemver), parser.ObjectLinterFns(parser.Or(IsXRD, IsComposition))) } +// NewFunctionLinter is a convenience function for creating a package linter for +// functions. +func NewFunctionLinter() parser.Linter { + return parser.NewPackageLinter(parser.PackageLinterFns(OneMeta), parser.ObjectLinterFns(IsFunction, PackageValidSemver), parser.ObjectLinterFns()) +} + // OneMeta checks that there is only one meta object in the package. func OneMeta(pkg *parser.Package) error { if len(pkg.GetMeta()) != 1 { @@ -88,6 +96,14 @@ func IsConfiguration(o runtime.Object) error { return nil } +// IsFunction checks that an object is a Function meta type. +func IsFunction(o runtime.Object) error { + if _, ok := o.(*pkgmetav1alpha1.Function); !ok { + return errors.New(errNotMetaFunction) + } + return nil +} + // PackageCrossplaneCompatible checks that the current Crossplane version is // compatible with the package constraints. func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { @@ -102,10 +118,10 @@ func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { } in, err := v.InConstraints(p.GetCrossplaneConstraints().Version) if err != nil { - return errors.Wrapf(err, errCrossplaneIncompatibleFmt, v.GetVersionString()) + return errors.Wrapf(err, errFmtCrossplaneIncompatible, v.GetVersionString()) } if !in { - return errors.Errorf(errCrossplaneIncompatibleFmt, v.GetVersionString()) + return errors.Errorf(errFmtCrossplaneIncompatible, v.GetVersionString()) } return nil } diff --git a/internal/xpkg/lint_test.go b/internal/xpkg/lint_test.go index bed163a4d..a25741b1f 100644 --- a/internal/xpkg/lint_test.go +++ b/internal/xpkg/lint_test.go @@ -60,6 +60,11 @@ kind: Configuration metadata: name: test`) + v1alpha1FuncBytes = []byte(`apiVersion: meta.pkg.crossplane.io/v1 + kind: Function + metadata: + name: test`) + v1ProvBytes = []byte(`apiVersion: meta.pkg.crossplane.io/v1 kind: Provider metadata: @@ -88,6 +93,8 @@ metadata: _ = yaml.Unmarshal(v1alpha1ProvBytes, v1alpha1ProvMeta) v1alpha1ConfMeta = &pkgmetav1alpha1.Configuration{} _ = yaml.Unmarshal(v1alpha1ConfBytes, v1alpha1ConfMeta) + v1alpha1FuncMeta = &pkgmetav1alpha1.Function{} + _ = yaml.Unmarshal(v1alpha1FuncBytes, v1alpha1FuncMeta) v1ProvMeta = &pkgmetav1.Provider{} _ = yaml.Unmarshal(v1ProvBytes, v1ProvMeta) v1ConfMeta = &pkgmetav1.Configuration{} @@ -206,6 +213,34 @@ func TestIsConfiguration(t *testing.T) { } } +func TestIsFunction(t *testing.T) { + cases := map[string]struct { + reason string + obj runtime.Object + err error + }{ + "v1alpha1": { + reason: "Should not return error if object is a v1alpha1 function.", + obj: v1alpha1FuncMeta, + }, + "ErrNotFunction": { + reason: "Should return error if object is not function.", + obj: v1beta1crd, + err: errors.New(errNotMetaFunction), + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + err := IsFunction(tc.obj) + + if diff := cmp.Diff(tc.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nIsFunction(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + func TestPackageCrossplaneCompatible(t *testing.T) { crossplaneConstraint := ">v0.13.0" errBoom := errors.New("boom") @@ -259,7 +294,7 @@ func TestPackageCrossplaneCompatible(t *testing.T) { MockGetVersionString: fake.NewMockGetVersionStringFn("v0.12.0"), }, }, - err: errors.Wrapf(errBoom, errCrossplaneIncompatibleFmt, "v0.12.0"), + err: errors.Wrapf(errBoom, errFmtCrossplaneIncompatible, "v0.12.0"), }, "ErrOutsideConstraints": { reason: "Should return error if Crossplane version outside constraints.", @@ -278,7 +313,7 @@ func TestPackageCrossplaneCompatible(t *testing.T) { MockGetVersionString: fake.NewMockGetVersionStringFn("v0.12.0"), }, }, - err: errors.Errorf(errCrossplaneIncompatibleFmt, "v0.12.0"), + err: errors.Errorf(errFmtCrossplaneIncompatible, "v0.12.0"), }, "ErrNotMeta": { reason: "Should return error if object is not a meta package type.", diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 4438a5087..c85965110 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -399,7 +399,7 @@ func validateFieldPathSegment(parent *apiextensions.JSONSchemaProps, segment fie return nil, nil } -func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { +func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { //nolint:gocyclo // inherently complex if parent == nil { return nil, nil } @@ -420,7 +420,11 @@ func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segmen // Schema is not nil. // See https://github.com/kubernetes/kubernetes/blob/ff4eff24ac4fad5431aa89681717d6c4fe5733a4/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go#L828 if parent.AdditionalProperties != nil && (parent.AdditionalProperties.Allows || parent.AdditionalProperties.Schema != nil) { - return parent.AdditionalProperties.Schema, nil + if parent.AdditionalProperties.Schema != nil && parent.AdditionalProperties.Schema.Type != "" { + return parent.AdditionalProperties.Schema, nil + } + // re-evaluate the segment against the additional properties schema + return validateFieldPathSegmentField(parent.AdditionalProperties.Schema, segment) } return nil, errors.Errorf(errFmtFieldInvalid, segment.Field) diff --git a/pkg/validation/apiextensions/v1/composition/patches_test.go b/pkg/validation/apiextensions/v1/composition/patches_test.go index e57ab9cd8..114c5f4dc 100644 --- a/pkg/validation/apiextensions/v1/composition/patches_test.go +++ b/pkg/validation/apiextensions/v1/composition/patches_test.go @@ -444,6 +444,29 @@ func TestValidateFieldPath(t *testing.T) { schema: &apiextensions.JSONSchemaProps{Properties: map[string]apiextensions.JSONSchemaProps{"metadata": {Type: "object"}}}, }, }, + "AcceptXPreserveUnknownFieldsInAdditionalProperties": { + reason: "Should properly handle x-preserve-unknown-fields even if defined in a nested schema", + want: want{err: nil, fieldType: ""}, + args: args{ + fieldPath: "data.someField", + schema: &apiextensions.JSONSchemaProps{ + Properties: map[string]apiextensions.JSONSchemaProps{ + "data": { + Type: "object", + AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ + Schema: &apiextensions.JSONSchemaProps{ + XPreserveUnknownFields: &[]bool{true}[0], + }, + }, + }}}}, + }, + "AcceptAnnotations": { + want: want{err: nil, fieldType: "string"}, + args: args{ + fieldPath: "metadata.annotations[cooler-field]", + schema: getDefaultSchema(), + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { diff --git a/security/ADA-security-audit-23.pdf b/security/ADA-security-audit-23.pdf new file mode 100644 index 000000000..b9e0f1c38 Binary files /dev/null and b/security/ADA-security-audit-23.pdf differ diff --git a/test/e2e/README.md b/test/e2e/README.md index fa2712fb0..7d9536246 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -38,7 +38,7 @@ E2E_TEST_FLAGS="-test.run ^TestConfiguration" make e2e E2E_TEST_FLAGS="-labels area=apiextensions" make e2e # To test a specific feature, use the feature flag -E2E_TEST_FLAGS="-feature=Install" make e2e +E2E_TEST_FLAGS="-feature=ConfigurationWithDependency" make e2e # Stop immediately on first test failure, and leave the kind cluster to debug. E2E_TEST_FLAGS="-test.v -test.failfast -destroy-kind-cluster=false" @@ -46,6 +46,25 @@ E2E_TEST_FLAGS="-test.v -test.failfast -destroy-kind-cluster=false" # Use an existing Kubernetes cluster. Note that the E2E tests can't deploy your # local build of Crossplane in this scenario, so you'll have to do it yourself. E2E_TEST_FLAGS="-create-kind-cluster=false -destroy-kind-cluster=false -kubeconfig=$HOME/.kube/config" + +# Run the CrossplaneUpgrade feature, against an existing kind cluster named +# "kind" (or creating it if it doesn't exist), # without installing Crossplane +# first, as the feature expects the cluster to be empty, but still loading the +# images to # it. Setting the tests to fail fast and not destroying the cluster +# afterward in order to allow debugging it. +E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ + -destroy-kind-cluster=false \ + -kind-cluster-name=kind \ + -install-crossplane=false \ + -feature=CrossplaneUpgrade" make e2e + +# Run the all tests not installing or upgrading Crossplane against the currently +# selected cluster where Crossplane has already been installed. +E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ + -kubeconfig=$HOME/.kube/config \ + -skip-labels modify-crossplane-installation=true \ + -create-kind-cluster=false \ + -install-crossplane=false" make go.build e2e-run-tests ``` ## Test Parallelism @@ -66,10 +85,94 @@ uses a matrix strategy to invoke each area as its own job, running in parallel. > We're still learning what the best way to arrange E2E tests is. It's okay for > this pattern to change if it's not working well, but please discuss first! -Each feature under test consists of: +We try to follow this pattern when adding a new test: + +1. Define a single feature per Test function, if possible. +1. Setup a directory of plain YAML manifests per test - i.e. test fixtures - at + `e2e/manifests//`, usually with a `setup` sub-folder + containing resources to be deployed at setup phase and cleaned up during the + teardown. Try to avoid reusing other feature's fixtures, as this would introduce + hidden dependencies between tests. +1. Try reusing existing helpers as much as possible, see package + `github.com/crossplane/crossplane/test/e2e/funcs`, or add new ones there if + needed. +1. Prefer using the Fluent APIs to define features + (`features.New(...).WithSetup(...).Assess(...).WithTeardown(...).Feature()`). + 1. `features.Table` should be used only to define multiple self-contained + assessments to be run sequentially, but without assuming any ordering among + them, similarly to the usual table driven style we adopt for unit testing. +1. Prefer the usage of `WithSetup` and `WithTeardown` to their unnamed + counterparts (`Setup` and `Teardown`) to define the setup and teardown phases of + a feature, as they allow to provide a description. +1. Use short but explicative `CamelCase` sentences as descriptions for + everything used to define the name of tests/subtests, e.g. + `features.New("CrossplaneUpgrade", ...)` `WithSetup("InstallProviderNop", + ...)`, `Assess("ProviderNopIsInstalled", ...)`, + `WithTeardown("UninstallProviderNop", ...)`. +1. Use the `Setup` and `Teardown` phases to define respectively actions that are + not strictly part of the feature being tested, but are needed to make it + work, and actions that are needed to clean up the environment after the test + has run. +1. Use `Assess` steps to define the steps required to exercise the actual + feature at hand. +1. Use `Assess` steps to define both conditions that should hold and actions that + should be performed. In the former case use active descriptions, e.g. + `InstallProviderNop`, while in the latter use passive descriptions, e.g. + `ProviderNopIsInstalled`. +1. Try to group actions and the checks of what you have done `Assess` step in a + single step with an active description if possible, to avoid having twice the + steps and making it explicit that we are checking the action executed by the + previous function. e.g. `"UpgradeProvider"` should both upgrade the provider + and check that it becomes healthy within a reasonable time. +1. Avoid using the available context to pass data between steps, as it makes it + harder to understand the flow of the test and could lead to data races if not + handled properly. +1. Keep in mind that all `Setup` and `Teardown` steps, wherever are defined are + always going to be executed respectively before and after all the `Assess` + steps defined, so you can define `Teardowns` immediately after the step that defined the + resource to be deleted as a sort of `defer` statement. Same applies to `Setup` + steps which could actually be located immediately before the step requiring + them. But be careful with this, as a non-linear narrative is going to be easier + to follow, so if possible stick to all Setups at the beginning and all Teardowns + at the end of the feature. +1. Features can be assigned labels, to allow dicing and slicing the test suite, + see below for more details about available labels, but overall try to define + all the labels that could be useful to select the test in the future and make + sure it's actually being selected when run in CI. + +Here an example of a test following the above guidelines: + +```go +package e2e + +// ... + +// TestSomeFeature ... +func TestSomeFeature(t *testing.T) { + manifests := "test/e2e/manifests/pkg/some-area/some-feature" + namespace := "some-namespace" + // ... other variables or constants ... + + environment.Test(t, + features.New("ConfigurationWithDependency"). + WithLabel(LabelArea, ...). + WithLabel(LabelSize, ...). + // ... + WithSetup("ReadyPrerequisites", ... ). + // ... other setup steps ... + Assess("DoSomething", ... ). + Assess("SomethingElseIsInSomeState", ... ). + // ... other assess steps ... + WithTeardown("DeleteCreatedResources", ...). + // ... other teardown steps ... + Feature(), + ) +} + +// ... +``` -1. A directory of manifests - i.e. test fixtures. -1. A `features.Table` of assessment steps. +### Features' Labels Features are grouped into broad feature areas - e.g. `TestComposition` in `composition_test.go`. Features pertaining to Composition should be added to @@ -122,4 +225,4 @@ Refer to the [E2E one-pager] for more context. [CI GitHub workflow]: ../../.github/workflows/ci.yml [`testing`]: https://pkg.go.dev/testing [`e2e-framework`]: https://pkg.go.dev/sigs.k8s.io/e2e-framework -[E2e one-pager]: ../../design/one-pager-e2e-tests.md \ No newline at end of file +[E2e one-pager]: ../../design/one-pager-e2e-tests.md diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 833fe6cd0..d6da6a5d5 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -34,213 +34,125 @@ import ( // extensions (i.e. Composition, XRDs, etc). const LabelAreaAPIExtensions = "apiextensions" -// TestComposition tests Crossplane's Composition functionality. -func TestComposition(t *testing.T) { - // Test that a claim using a very minimal Composition (with no patches, - // transforms, or functions) will become available when its composed - // resources do. +// TestCompositionMinimal tests Crossplane's Composition functionality, +// checking that a claim using a very minimal Composition (with no patches, +// transforms, or functions) will become available when its composed +// resources do. +func TestCompositionMinimal(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/minimal" - minimal := features.Table{ - { - Name: "PrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, - { - Name: "ClaimIsCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), - ), - }, - { - Name: "ClaimBecomesAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available()), - }, - { - Name: "ClaimIsDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "claim.yaml"), - funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), - ), - }, - { - Name: "PrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), - }, - } - // Test that a claim using patch-and-transform Composition will become - // available when its composed resources do, and have a field derived from - // the patch. - manifests = "test/e2e/manifests/apiextensions/composition/patch-and-transform" - pandt := features.Table{ - { - Name: "PrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, - { - Name: "ClaimIsCreated", - Assessment: funcs.AllOf( + environment.Test(t, + features.New("CompositionMinimal"). + WithLabel(LabelArea, LabelAreaAPIExtensions). + WithLabel(LabelSize, LabelSizeSmall). + WithSetup("PrerequisitesAreCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + )). + Assess("CreateClaim", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), - ), - }, - { - Name: "ClaimBecomesAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available()), - }, - { - Name: "ClaimHasPatchedField", - Assessment: funcs.ResourcesHaveFieldValueWithin(5*time.Minute, manifests, "claim.yaml", "status.coolerField", "I'M COOL!"), - }, - { - Name: "ClaimIsDeleted", - Assessment: funcs.AllOf( + funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available()), + )). + WithTeardown("DeleteClaim", funcs.AllOf( funcs.DeleteResources(manifests, "claim.yaml"), funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), - ), - }, - { - Name: "PrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), - }, - } + )). + WithTeardown("DeletePrerequisites", funcs.AllOf( + funcs.DeleteResources(manifests, "setup/*.yaml"), + funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "setup/*.yaml"), + )). + Feature(), + ) +} - setup := funcs.ReadyToTestWithin(1*time.Minute, namespace) +// TestCompositionPatchAndTransform tests Crossplane's Composition functionality, +// checking that a claim using patch-and-transform Composition will become +// available when its composed resources do, and have a field derived from +// the patch. +func TestCompositionPatchAndTransform(t *testing.T) { + + manifests := "test/e2e/manifests/apiextensions/composition/patch-and-transform" environment.Test(t, - minimal.Build("Minimal"). + features.New("CompositionPatchAndTransform"). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). - Setup(setup).Feature(), - pandt.Build("PatchAndTransform"). - WithLabel(LabelArea, LabelAreaAPIExtensions). - WithLabel(LabelSize, LabelSizeSmall). - Setup(setup).Feature(), + WithSetup("CreatePrerequisites", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + )). + Assess("CreateClaim", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), + )). + Assess("ClaimIsReady", + funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available())). + Assess("ClaimHasPatchedField", + funcs.ResourcesHaveFieldValueWithin(5*time.Minute, manifests, "claim.yaml", "status.coolerField", "I'M COOL!"), + ). + WithTeardown("DeleteClaim", funcs.AllOf( + funcs.DeleteResources(manifests, "claim.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.AllOf( + funcs.DeleteResources(manifests, "setup/*.yaml"), + funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "setup/*.yaml"), + )). + Feature(), ) -} -func TestValidation(t *testing.T) { +} - // A valid Composition should be created when validated in strict mode. - manifests := "test/e2e/manifests/apiextensions/validation/composition-schema-valid" - valid := features.Table{ - { - Name: "PrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, - { - Name: "ProviderIsHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/provider.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "CompositionIsCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "composition.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition.yaml"), - ), - }, - { - Name: "CompositionIsDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "composition.yaml"), - funcs.ResourcesDeletedWithin(30*time.Second, manifests, "composition.yaml"), - ), - }, - { - Name: "PrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), - }, - } +func TestCompositionValidation(t *testing.T) { + manifests := "test/e2e/manifests/apiextensions/composition/validation" - // An invalid Composition should be rejected when validated in strict mode. - manifests = "test/e2e/manifests/apiextensions/validation/composition-schema-invalid" - invalid := features.Table{ - { - Name: "PrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, + cases := features.Table{ { - Name: "ProviderIsHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/provider.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "CompositionIsCreated", + // A valid Composition should be created when validated in strict mode. + Name: "ValidCompositionIsAccepted", Assessment: funcs.AllOf( - funcs.ResourcesFailToApply(FieldManager, manifests, "composition.yaml"), + funcs.ApplyResources(FieldManager, manifests, "composition-valid.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition-valid.yaml"), ), }, { - Name: "PrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), + // An invalid Composition should be rejected when validated in strict mode. + Name: "InvalidCompositionIsRejected", + Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "composition-invalid.yaml"), }, } - - // Enable our feature flag. - setup := funcs.AllOf( - funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions(helm.WithArgs("--set args={--debug,--enable-composition-webhook-schema-validation}"))...)), - funcs.ReadyToTestWithin(1*time.Minute, namespace), - ) - - // Disable our feature flag. - teardown := funcs.AllOf( - funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), - funcs.ReadyToTestWithin(1*time.Minute, namespace), - ) - environment.Test(t, - valid.Build("ValidComposition"). - WithLabel(LabelStage, LabelStageAlpha). - WithLabel(LabelArea, LabelAreaAPIExtensions). - WithLabel(LabelSize, LabelSizeSmall). - Setup(setup). - Teardown(teardown). - Feature(), - invalid.Build("InvalidComposition"). + cases.Build("CompositionValidation"). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). - Setup(setup). - Teardown(teardown). + WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). + // Enable our feature flag. + WithSetup("EnableAlphaCompositionValidation", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions(helm.WithArgs("--set args={--debug,--enable-composition-webhook-schema-validation}"))...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + WithSetup("CreatePrerequisites", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/provider.yaml", pkgv1.Healthy(), pkgv1.Active()), + )). + WithTeardown("DeleteValidComposition", funcs.AllOf( + funcs.DeleteResources(manifests, "*-valid.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "*-valid.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.AllOf( + funcs.DeleteResources(manifests, "setup/*.yaml"), + funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "setup/*.yaml"), + )). + // Disable our feature flag. + WithTeardown("DisableAlphaCompositionValidation", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). Feature(), ) } diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index 78330a5a9..46bc7f987 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -18,12 +18,19 @@ package funcs import ( "context" + "fmt" + "os" + "path/filepath" "testing" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" "sigs.k8s.io/e2e-framework/pkg/features" "sigs.k8s.io/e2e-framework/third_party/helm" + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/yaml" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -32,6 +39,8 @@ import ( secretsv1alpha1 "github.com/crossplane/crossplane/apis/secrets/v1alpha1" ) +type kindConfigContextKey string + // HelmRepo manages a Helm repo. func HelmRepo(o ...helm.Option) env.Func { return func(ctx context.Context, c *envconf.Config) (context.Context, error) { @@ -102,3 +111,104 @@ func EnvFuncs(fns ...env.Func) env.Func { return ctx, nil } } + +// CreateKindClusterWithConfig create kind cluster of the given name according to +// configuration referred via configFilePath. +// The configuration is placed in test context afterward +func CreateKindClusterWithConfig(clusterName, configFilePath string) env.Func { + return EnvFuncs( + envfuncs.CreateKindClusterWithConfig(clusterName, "\"\"", configFilePath), + func(ctx context.Context, config *envconf.Config) (context.Context, error) { + b, err := os.ReadFile(filepath.Clean(configFilePath)) + if err != nil { + return ctx, err + } + cfg := &v1alpha4.Cluster{} + err = yaml.Unmarshal(b, cfg) + if err != nil { + return ctx, err + } + return context.WithValue(ctx, kindConfigContextKey(clusterName), cfg), nil + }, + ) +} + +// ServiceIngressEndPoint returns endpoint (addr:port) that can be used for accessing +// the service in the cluster with the given name. +func ServiceIngressEndPoint(ctx context.Context, cfg *envconf.Config, clusterName, namespace, serviceName string) (string, error) { + _, found := envfuncs.GetKindClusterFromContext(ctx, clusterName) + client := cfg.Client() + service := &corev1.Service{} + err := client.Resources().Get(ctx, serviceName, namespace, service) + if err != nil { + return "", errors.Errorf("cannot get service %s/%s at cluster %s: %w", namespace, serviceName, clusterName, err) + } + + var nodePort int32 + for _, p := range service.Spec.Ports { + if p.NodePort != 0 { + nodePort = p.NodePort + break + } + } + if nodePort == 0 { + return "", errors.Errorf("No nodePort found for service %s/%s at cluster %s", namespace, serviceName, clusterName) + } + if found { + kindCfg, err := kindConfig(ctx, clusterName) + if err != nil { + return "", errors.Errorf("cannot get kind config for cluster %s: %w", clusterName, err) + } + hostPort, err := findHostPort(kindCfg, nodePort) + if err != nil { + return "", errors.Errorf("cannot find hostPort for nodePort %d in kind config for cluster %s: %w", nodePort, clusterName, err) + } + return fmt.Sprintf("localhost:%v", hostPort), nil + } + nodes := &corev1.NodeList{} + if err := client.Resources().List(ctx, nodes); err != nil { + return "", errors.Errorf("cannot list nodes for cluster %s: %w", clusterName, err) + } + addr, err := findAnyNodeIPAddress(nodes) + if err != nil { + return "", errors.Errorf("cannot find any node IP address for cluster %s: %w", clusterName, err) + } + return fmt.Sprintf("%s:%v", addr, nodePort), nil +} + +func kindConfig(ctx context.Context, clusterName string) (*v1alpha4.Cluster, error) { + v := ctx.Value(kindConfigContextKey(clusterName)) + if v == nil { + return nil, errors.Errorf("No kind config found in context for cluster %s", clusterName) + } + kindCfg, ok := v.(*v1alpha4.Cluster) + if !ok { + return nil, errors.Errorf("kind config is not of type v1alpha4.Cluster for clustername %s", clusterName) + } + return kindCfg, nil +} + +func findAnyNodeIPAddress(nodes *corev1.NodeList) (string, error) { + if len(nodes.Items) == 0 { + return "", errors.New("no nodes in the cluster") + } + for _, a := range nodes.Items[0].Status.Addresses { + if a.Type == corev1.NodeInternalIP { + return a.Address, nil + } + } + return "", errors.Errorf("no ip address found for nodes: %v", nodes) +} + +func findHostPort(kindCfg *v1alpha4.Cluster, containerPort int32) (int32, error) { + for _, n := range kindCfg.Nodes { + if n.Role == v1alpha4.ControlPlaneRole { + for _, pm := range n.ExtraPortMappings { + if pm.ContainerPort == containerPort { + return pm.HostPort, nil + } + } + } + } + return 0, errors.Errorf("No host port found in kind config for container port: %v", containerPort) +} diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index 759b174bf..4e78f2d4e 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -22,16 +22,22 @@ import ( "io/fs" "os" "path/filepath" + "reflect" "strings" + "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/daemon" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/json" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/e2e-framework/klient/decoder" @@ -45,6 +51,8 @@ import ( xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" ) // AllOf runs the supplied functions in order. @@ -384,6 +392,111 @@ func DeleteResources(dir, pattern string) features.Func { } } +// CopyImageToRegistry tries to copy the supplied image to the supplied registry within the timeout +func CopyImageToRegistry(clusterName, ns, sName, image string, timeout time.Duration) features.Func { + return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + reg, err := ServiceIngressEndPoint(ctx, c, clusterName, ns, sName) + if err != nil { + t.Fatal(err) + } + + t.Logf("registry endpoint %s", reg) + srcRef, err := name.ParseReference(image) + if err != nil { + t.Fatal(err) + } + + src, err := daemon.Image(srcRef) + if err != nil { + t.Fatal(err) + } + + i := strings.Split(srcRef.String(), "/") + err = wait.For(func() (done bool, err error) { + err = crane.Push(src, fmt.Sprintf("%s/%s", reg, i[1]), crane.Insecure) + if err != nil { + return false, nil //nolint:nilerr // we want to retry and to throw error + } + return true, nil + }, wait.WithTimeout(timeout)) + if err != nil { + t.Fatalf("copying image `%s` to registry `%s` not successful: %v", image, reg, err) + } + + return ctx + } +} + +// ManagedResourcesOfClaimHaveFieldValueWithin fails a test if the managed resources +// created by the claim does not have the supplied value at the supplied path +// within the supplied duration. +func ManagedResourcesOfClaimHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { + return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + cm := &claim.Unstructured{} + if err := decoder.DecodeFile(os.DirFS(dir), file, cm); err != nil { + t.Error(err) + return ctx + } + + if err := c.Client().Resources().Get(ctx, cm.GetName(), cm.GetNamespace(), cm); err != nil { + t.Errorf("cannot get claim %s: %v", cm.GetName(), err) + return ctx + } + + xrRef := cm.GetResourceReference() + uxr := &composite.Unstructured{} + + uxr.SetGroupVersionKind(xrRef.GroupVersionKind()) + if err := c.Client().Resources().Get(ctx, xrRef.Name, xrRef.Namespace, uxr); err != nil { + t.Errorf("cannot get composite %s: %v", xrRef.Name, err) + return ctx + } + + mrRefs := uxr.GetResourceReferences() + + list := &unstructured.UnstructuredList{} + for _, ref := range mrRefs { + mr := &unstructured.Unstructured{} + mr.SetName(ref.Name) + mr.SetNamespace(ref.Namespace) + mr.SetGroupVersionKind(ref.GroupVersionKind()) + list.Items = append(list.Items, *mr) + } + + count := atomic.Int32{} + match := func(o k8s.Object) bool { + // filter function should return true if the object needs to be checked. e.g., if you want to check the field + // path of a VPC object, filter function should return true for VPC objects only. + if filter != nil && !filter(o) { + t.Logf("skipping resource %s/%s/%s due to filtering", o.GetNamespace(), o.GetName(), o.GetObjectKind().GroupVersionKind().String()) + return true + } + count.Add(1) + u := asUnstructured(o) + got, err := fieldpath.Pave(u.Object).GetValue(path) + if err != nil { + return false + } + + return cmp.Equal(want, got) + } + + if err := wait.For(conditions.New(c.Client().Resources()).ResourcesMatch(list, match), wait.WithTimeout(d)); err != nil { + y, _ := yaml.Marshal(list.Items) + t.Errorf("resources did not have desired conditions: %s: %v:\n\n%s\n\n", want, err, y) + return ctx + } + + if count.Load() == 0 { + t.Errorf("there are no unfiltered referred managed resources to check") + return ctx + } + + t.Logf("%d resources have desired value %q at field path %s", len(list.Items), want, path) + return ctx + } +} + // asUnstructured turns an arbitrary runtime.Object into an *Unstructured. If // it's already a concrete *Unstructured it just returns it, otherwise it // round-trips it through JSON encoding. This is necessary because types that @@ -404,8 +517,29 @@ func asUnstructured(o runtime.Object) *unstructured.Unstructured { // namespace. func identifier(o k8s.Object) string { k := o.GetObjectKind().GroupVersionKind().Kind + if k == "" { + t := reflect.TypeOf(o) + if t != nil { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + k = t.Name() + } else { + k = fmt.Sprintf("%T", o) + } + } if o.GetNamespace() == "" { return fmt.Sprintf("%s %s", k, o.GetName()) } return fmt.Sprintf("%s %s/%s", k, o.GetNamespace(), o.GetName()) } + +// FilterByGK returns a filter function that returns true if the supplied object is of the supplied GroupKind. +func FilterByGK(gk schema.GroupKind) func(o k8s.Object) bool { + return func(o k8s.Object) bool { + if o.GetObjectKind() == nil { + return false + } + return o.GetObjectKind().GroupVersionKind().Group == gk.Group && o.GetObjectKind().GroupVersionKind().Kind == gk.Kind + } +} diff --git a/test/e2e/install_test.go b/test/e2e/install_test.go index fb00ff653..648d40b5d 100644 --- a/test/e2e/install_test.go +++ b/test/e2e/install_test.go @@ -36,108 +36,72 @@ import ( // Crossplane's lifecycle (installing, upgrading, etc). const LabelAreaLifecycle = "lifecycle" -// TestCrossplane tests installing, uninstalling, and upgrading Crossplane. -func TestCrossplane(t *testing.T) { - // We install Crossplane as part of setting up the test environment, so - // we're really only validating the installation here. - install := features.Table{ - { - Name: "CoreDeploymentBecomesAvailable", - Assessment: funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane"), - }, - { - Name: "RBACManagerDeploymentBecomesAvailable", - Assessment: funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane-rbac-manager"), - }, - { - Name: "CoreCRDsBecomeEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, crdsDir, "*.yaml", funcs.CRDInitialNamesAccepted()), - }, - } - - // Test that it's possible to cleanly uninstall Crossplane, even after - // having created and deleted a claim. - manifests := "test/e2e/manifests/lifecycle/uninstall" - uninstall := features.Table{ - { - Name: "ClaimPrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, - { - Name: "ClaimIsCreated", - Assessment: funcs.AllOf( +// TestCrossplaneLifecycle tests two features expecting them to be run in order: +// - CrossplaneUninstall: Test that it's possible to cleanly uninstall Crossplane, even +// after having created and deleted a claim. +// - CrossplaneUpgrade: Test that it's possible to upgrade Crossplane from the most recent +// stable Helm chart to the one we're testing, even when a claim exists. This +// expects Crossplane not to be installed. +// +// Note: First time Installation is tested as part of the environment setup, +// if not disabled explicitly. +func TestCrossplaneLifecycle(t *testing.T) { + manifests := "test/e2e/manifests/lifecycle/upgrade" + environment.Test(t, + // Test that it's possible to cleanly uninstall Crossplane, even after + // having created and deleted a claim. + features.New("CrossplaneUninstall"). + WithLabel(LabelArea, LabelAreaLifecycle). + WithLabel(LabelSize, LabelSizeSmall). + WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). + WithSetup("CreatePrerequisites", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + )). + WithSetup("XRDAreEstablished", funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite())). + WithSetup("CreateClaim", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), - ), - }, - { - Name: "ClaimBecomesAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available()), - }, - { - Name: "ClaimIsDeleted", - Assessment: funcs.AllOf( + )). + WithSetup("ClaimIsAvailable", funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available())). + Assess("DeleteClaim", funcs.AllOf( funcs.DeleteResources(manifests, "claim.yaml"), funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), - ), - }, - { - Name: "PrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "CrossplaneIsUninstalled", - Assessment: funcs.AsFeaturesFunc(funcs.HelmUninstall( - helm.WithName(helmReleaseName), - helm.WithNamespace(namespace), - )), - }, - // Uninstalling the Crossplane Helm chart doesn't remove its CRDs. We - // want to make sure they can be deleted cleanly. If they can't, it's a - // sign something they define might have stuck around. - { - Name: "CoreCRDsAreDeleted", - Assessment: funcs.AllOf( + )). + Assess("DeletePrerequisites", funcs.AllOf( + funcs.DeleteResources(manifests, "setup/*.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "setup/*.yaml"), + )). + Assess("UninstallCrossplane", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUninstall( + helm.WithName(helmReleaseName), + helm.WithNamespace(namespace), + )), + )). + // Uninstalling the Crossplane Helm chart doesn't remove its CRDs. We + // want to make sure they can be deleted cleanly. If they can't, it's a + // sign something they define might have stuck around. + WithTeardown("DeleteCrossplaneCRDs", funcs.AllOf( funcs.DeleteResources(crdsDir, "*.yaml"), funcs.ResourcesDeletedWithin(3*time.Minute, crdsDir, "*.yaml"), - ), - }, - // Uninstalling the Crossplane Helm chart doesn't remove the namespace - // it was installed to either. We want to make sure it can be deleted - // cleanly. - { - Name: "CrossplaneNamespaceIsDeleted", - Assessment: funcs.AllOf( + )). + // Uninstalling the Crossplane Helm chart doesn't remove the namespace + // it was installed to either. We want to make sure it can be deleted + // cleanly. + WithTeardown("DeleteCrossplaneNamespace", funcs.AllOf( funcs.AsFeaturesFunc(envfuncs.DeleteNamespace(namespace)), funcs.ResourceDeletedWithin(3*time.Minute, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}), - ), - }, - } - - // Test that it's possible to upgrade from the most recent stable Crossplane - // Helm chart to the one we're testing, even when a claim exists. - manifests = "test/e2e/manifests/lifecycle/upgrade" - upgrade := features.Table{ - { - Name: "CrossplaneNamespaceIsCreated", - Assessment: funcs.AllOf( - funcs.AsFeaturesFunc(envfuncs.CreateNamespace(namespace)), - funcs.ResourceCreatedWithin(1*time.Minute, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}), - ), - }, - { - Name: "CrossplaneStableIsInstalled", - Assessment: funcs.AllOf( + )). + Feature(), + features.New("CrossplaneUpgrade"). + WithLabel(LabelArea, LabelAreaLifecycle). + WithLabel(LabelSize, LabelSizeSmall). + // We expect Crossplane to have been uninstalled first + Assess("CrossplaneIsNotInstalled", funcs.AllOf( + funcs.ResourceDeletedWithin(1*time.Minute, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}), + funcs.ResourcesDeletedWithin(3*time.Minute, crdsDir, "*.yaml"), + )). + Assess("InstallStableCrossplane", funcs.AllOf( funcs.AsFeaturesFunc(funcs.HelmRepo( helm.WithArgs("add"), helm.WithArgs("crossplane-stable"), @@ -147,83 +111,35 @@ func TestCrossplane(t *testing.T) { helm.WithNamespace(namespace), helm.WithName(helmReleaseName), helm.WithChart("crossplane-stable/crossplane"), + helm.WithArgs("--create-namespace"), )), - ), - }, - { - Name: "CrossplaneStableIsRunning", - Assessment: funcs.ReadyToTestWithin(1*time.Minute, namespace), - }, - { - Name: "ClaimPrerequisitesAreCreated", - Assessment: funcs.AllOf( - funcs.ApplyResources(FieldManager, manifests, "prerequisites/*.yaml"), - funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/*.yaml"), - ), - }, - { - Name: "XRDBecomesEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", apiextensionsv1.WatchingComposite()), - }, - { - Name: "ClaimIsCreated", - Assessment: funcs.AllOf( + funcs.ReadyToTestWithin(1*time.Minute, namespace))). + Assess("CreateClaimPrerequisites", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + )). + Assess("XRDIsEstablished", funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite())). + Assess("CreateClaim", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), - ), - }, - { - Name: "ClaimBecomesAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available()), - }, - { - Name: "CrossplaneIsUpgraded", - Assessment: funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), - }, - { - Name: "CoreDeploymentBecomesAvailable", - Assessment: funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane"), - }, - { - Name: "RBACManagerDeploymentBecomesAvailable", - Assessment: funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane-rbac-manager"), - }, - { - Name: "CoreCRDsBecomeEstablished", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, crdsDir, "*.yaml", funcs.CRDInitialNamesAccepted()), - }, - { - Name: "ClaimStillAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available()), - }, - { - Name: "ClaimIsDeleted", - Assessment: funcs.AllOf( + )). + Assess("ClaimIsAvailable", funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available())). + Assess("UpgradeCrossplane", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + Assess("CoreDeploymentIsAvailable", funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane")). + Assess("RBACManagerDeploymentIsAvailable", funcs.DeploymentBecomesAvailableWithin(1*time.Minute, namespace, "crossplane-rbac-manager")). + Assess("CoreCRDsAreEstablished", funcs.ResourcesHaveConditionWithin(1*time.Minute, crdsDir, "*.yaml", funcs.CRDInitialNamesAccepted())). + Assess("ClaimIsStillAvailable", funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "claim.yaml", xpv1.Available())). + Assess("DeleteClaim", funcs.AllOf( funcs.DeleteResources(manifests, "claim.yaml"), funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), - ), - }, - { - Name: "ClaimPrerequisitesAreDeleted", - Assessment: funcs.AllOf( - funcs.DeleteResources(manifests, "prerequisites/*.yaml"), - funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "prerequisites/*.yaml"), - ), - }, - } - - environment.Test(t, - install.Build("Install"). - WithLabel(LabelArea, LabelAreaLifecycle). - WithLabel(LabelSize, LabelSizeSmall). - Feature(), - uninstall.Build("Uninstall"). - WithLabel(LabelArea, LabelAreaLifecycle). - WithLabel(LabelSize, LabelSizeLarge). - Feature(), - upgrade.Build("Upgrade"). - WithLabel(LabelArea, LabelAreaLifecycle). - WithLabel(LabelSize, LabelSizeLarge). + )). + WithTeardown("DeletePrerequisites", funcs.AllOf( + funcs.DeleteResources(manifests, "setup/*.yaml"), + funcs.ResourcesDeletedWithin(3*time.Minute, manifests, "setup/*.yaml"), + )). Feature(), ) } diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 8c3813ead..a9464975f 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -19,11 +19,13 @@ package e2e import ( "flag" "os" + "path/filepath" "strings" "testing" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/e2e-framework/klient/conf" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/envfuncs" @@ -37,6 +39,15 @@ import ( // Features within an area may be split across different test functions. const LabelArea = "area" +// LabelModifyCrossplaneInstallation is used to mark tests that are going to +// modify Crossplane's installation, e.g. installing, uninstalling or upgrading +// it. +const LabelModifyCrossplaneInstallation = "modify-crossplane-installation" + +// LabelModifyCrossplaneInstallationTrue is used to mark tests that are going to +// modify Crossplane's installation. +const LabelModifyCrossplaneInstallationTrue = "true" + // LabelStage represents the 'stage' of a feature - alpha, beta, etc. Generally // available features have no stage label. const LabelStage = "stage" @@ -89,6 +100,9 @@ func HelmOptions(extra ...helm.Option) []helm.Option { helm.WithName(helmReleaseName), helm.WithNamespace(namespace), helm.WithChart(helmChartDir), + // wait for the deployment to be ready for up to 5 minutes before returning + helm.WithWait(), + helm.WithTimeout("5m"), helm.WithArgs( // Run with debug logging to ensure all log statements are run. "--set args={--debug}", @@ -103,8 +117,11 @@ func HelmOptions(extra ...helm.Option) []helm.Option { return append(o, extra...) } -// The test environment, shared by all E2E test functions. -var environment env.Environment +var ( + // The test environment, shared by all E2E test functions. + environment env.Environment + clusterName string +) func TestMain(m *testing.M) { // TODO(negz): Global loggers are dumb and klog is dumb. Remove this when @@ -112,29 +129,57 @@ func TestMain(m *testing.M) { // https://github.com/kubernetes-sigs/e2e-framework/issues/270 log.SetLogger(klog.NewKlogr()) - create := flag.Bool("create-kind-cluster", true, "create a kind cluster (and deploy Crossplane) before running tests") + kindClusterName := flag.String("kind-cluster-name", "", "name of the kind cluster to use") + create := flag.Bool("create-kind-cluster", true, "create a kind cluster (and deploy Crossplane) before running tests, if the cluster does not already exist with the same name") destroy := flag.Bool("destroy-kind-cluster", true, "destroy the kind cluster when tests complete") - - clusterName := envconf.RandomName("crossplane-e2e", 32) - environment, _ = env.NewFromFlags() + install := flag.Bool("install-crossplane", true, "install Crossplane before running tests") + load := flag.Bool("load-images-kind-cluster", true, "load Crossplane images into the kind cluster before running tests") var setup []env.Func var finish []env.Func - if *create { + cfg, _ := envconf.NewFromFlags() + + clusterName = envconf.RandomName("crossplane-e2e", 32) + if *kindClusterName != "" { + clusterName = *kindClusterName + } + + // we want to create the cluster if it doesn't exist, but only if we're + isKindCluster := *create || *kindClusterName != "" + if isKindCluster { + kindCfg, err := filepath.Abs(filepath.Join("test", "e2e", "testdata", "kindConfig.yaml")) + if err != nil { + log.Log.Error(err, "error getting kind config file") + os.Exit(1) + } setup = []env.Func{ - envfuncs.CreateKindCluster(clusterName), + funcs.CreateKindClusterWithConfig(clusterName, kindCfg), + } + } else { + cfg.WithKubeconfigFile(conf.ResolveKubeConfigFile()) + } + environment = env.NewWithConfig(cfg) + + if *load && isKindCluster { + setup = append(setup, envfuncs.LoadDockerImageToCluster(clusterName, imgcore), envfuncs.LoadDockerImageToCluster(clusterName, imgxfn), + ) + } + if *install { + setup = append(setup, envfuncs.CreateNamespace(namespace), funcs.HelmInstall(HelmOptions()...), - } + ) } // We always want to add our types to the scheme. setup = append(setup, funcs.AddCrossplaneTypesToScheme()) - if *destroy { + // We want to destroy the cluster if we created it, but only if we created it, + // otherwise the random name will be meaningless. + if *destroy && isKindCluster { finish = []env.Func{envfuncs.DestroyKindCluster(clusterName)} } diff --git a/test/e2e/manifests/apiextensions/composition/minimal/prerequisites/composition.yaml b/test/e2e/manifests/apiextensions/composition/minimal/setup/composition.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/minimal/prerequisites/composition.yaml rename to test/e2e/manifests/apiextensions/composition/minimal/setup/composition.yaml diff --git a/test/e2e/manifests/apiextensions/composition/minimal/prerequisites/definition.yaml b/test/e2e/manifests/apiextensions/composition/minimal/setup/definition.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/minimal/prerequisites/definition.yaml rename to test/e2e/manifests/apiextensions/composition/minimal/setup/definition.yaml diff --git a/test/e2e/manifests/apiextensions/composition/minimal/prerequisites/provider.yaml b/test/e2e/manifests/apiextensions/composition/minimal/setup/provider.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/minimal/prerequisites/provider.yaml rename to test/e2e/manifests/apiextensions/composition/minimal/setup/provider.yaml diff --git a/test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/composition.yaml b/test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/composition.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/composition.yaml rename to test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/composition.yaml diff --git a/test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/definition.yaml b/test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/definition.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/definition.yaml rename to test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/definition.yaml diff --git a/test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/provider.yaml b/test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/provider.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/composition/patch-and-transform/prerequisites/provider.yaml rename to test/e2e/manifests/apiextensions/composition/patch-and-transform/setup/provider.yaml diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-invalid/composition.yaml b/test/e2e/manifests/apiextensions/composition/validation/composition-invalid.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/validation/composition-schema-invalid/composition.yaml rename to test/e2e/manifests/apiextensions/composition/validation/composition-invalid.yaml diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-valid/composition.yaml b/test/e2e/manifests/apiextensions/composition/validation/composition-valid.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/validation/composition-schema-valid/composition.yaml rename to test/e2e/manifests/apiextensions/composition/validation/composition-valid.yaml diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-invalid/prerequisites/definition.yaml b/test/e2e/manifests/apiextensions/composition/validation/setup/definition.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/validation/composition-schema-invalid/prerequisites/definition.yaml rename to test/e2e/manifests/apiextensions/composition/validation/setup/definition.yaml diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-invalid/prerequisites/provider.yaml b/test/e2e/manifests/apiextensions/composition/validation/setup/provider.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/validation/composition-schema-invalid/prerequisites/provider.yaml rename to test/e2e/manifests/apiextensions/composition/validation/setup/provider.yaml diff --git a/test/e2e/manifests/lifecycle/uninstall/claim.yaml b/test/e2e/manifests/lifecycle/uninstall/claim.yaml deleted file mode 100644 index be6360adf..000000000 --- a/test/e2e/manifests/lifecycle/uninstall/claim.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - namespace: default - name: lifecycle-uninstall -spec: - coolField: "I'm cool!" - # This is necessary to ensure the claim's MRs are actually gone before we - # delete the Provider - https://github.com/crossplane/crossplane/issues/4251 - compositeDeletePolicy: Foreground diff --git a/test/e2e/manifests/lifecycle/upgrade/prerequisites/composition.yaml b/test/e2e/manifests/lifecycle/upgrade/prerequisites/composition.yaml deleted file mode 100644 index 850f430e6..000000000 --- a/test/e2e/manifests/lifecycle/upgrade/prerequisites/composition.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: xnopresources.nop.example.org -spec: - compositeTypeRef: - apiVersion: nop.example.org/v1alpha1 - kind: XNopResource - resources: - - name: nop-resource-1 - base: - apiVersion: nop.crossplane.io/v1alpha1 - kind: NopResource - spec: - forProvider: - conditionAfter: - - conditionType: Ready - conditionStatus: "False" - time: 0s - - conditionType: Ready - conditionStatus: "True" - time: 10s diff --git a/test/e2e/manifests/lifecycle/uninstall/prerequisites/composition.yaml b/test/e2e/manifests/lifecycle/upgrade/setup/composition.yaml similarity index 100% rename from test/e2e/manifests/lifecycle/uninstall/prerequisites/composition.yaml rename to test/e2e/manifests/lifecycle/upgrade/setup/composition.yaml diff --git a/test/e2e/manifests/lifecycle/uninstall/prerequisites/definition.yaml b/test/e2e/manifests/lifecycle/upgrade/setup/definition.yaml similarity index 100% rename from test/e2e/manifests/lifecycle/uninstall/prerequisites/definition.yaml rename to test/e2e/manifests/lifecycle/upgrade/setup/definition.yaml diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-valid/prerequisites/provider.yaml b/test/e2e/manifests/lifecycle/upgrade/setup/provider.yaml similarity index 100% rename from test/e2e/manifests/apiextensions/validation/composition-schema-valid/prerequisites/provider.yaml rename to test/e2e/manifests/lifecycle/upgrade/setup/provider.yaml diff --git a/test/e2e/manifests/xfnrunner/private-registry/pull/claim.yaml b/test/e2e/manifests/xfnrunner/private-registry/pull/claim.yaml new file mode 100644 index 000000000..c4599aa85 --- /dev/null +++ b/test/e2e/manifests/xfnrunner/private-registry/pull/claim.yaml @@ -0,0 +1,9 @@ +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: fn-labelizer + namespace: default +spec: + coolField: example + compositionRef: + name: fn.xnopresources.nop.example.org \ No newline at end of file diff --git a/test/e2e/manifests/xfnrunner/private-registry/pull/composition.yaml b/test/e2e/manifests/xfnrunner/private-registry/pull/composition.yaml new file mode 100644 index 000000000..227484321 --- /dev/null +++ b/test/e2e/manifests/xfnrunner/private-registry/pull/composition.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: fn.xnopresources.nop.example.org + labels: + provider: provider-nop +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: XNopResource + resources: + - name: nopinstance1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 10s + - conditionType: Synced + conditionStatus: "False" + time: 0s + - conditionType: Synced + conditionStatus: "True" + time: 10s + writeConnectionSecretsToRef: + namespace: crossplane-system + name: nop-example-resource + functions: + - name: labelizer + type: Container + container: + image: private-docker-registry.xfn-registry.svc.cluster.local:5000/fn-labelizer:latest + imagePullPolicy: Always \ No newline at end of file diff --git a/test/e2e/manifests/lifecycle/upgrade/prerequisites/definition.yaml b/test/e2e/manifests/xfnrunner/private-registry/pull/prerequisites/definition.yaml similarity index 100% rename from test/e2e/manifests/lifecycle/upgrade/prerequisites/definition.yaml rename to test/e2e/manifests/xfnrunner/private-registry/pull/prerequisites/definition.yaml diff --git a/test/e2e/manifests/lifecycle/uninstall/prerequisites/provider.yaml b/test/e2e/manifests/xfnrunner/private-registry/pull/prerequisites/provider.yaml similarity index 100% rename from test/e2e/manifests/lifecycle/uninstall/prerequisites/provider.yaml rename to test/e2e/manifests/xfnrunner/private-registry/pull/prerequisites/provider.yaml diff --git a/test/e2e/manifests/xfnrunner/tmp-writer/claim.yaml b/test/e2e/manifests/xfnrunner/tmp-writer/claim.yaml new file mode 100644 index 000000000..d2e90fd0b --- /dev/null +++ b/test/e2e/manifests/xfnrunner/tmp-writer/claim.yaml @@ -0,0 +1,9 @@ +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: fn-tmp-writer + namespace: default +spec: + coolField: example + compositionRef: + name: fn.xnopresources.nop.example.org \ No newline at end of file diff --git a/test/e2e/manifests/xfnrunner/tmp-writer/composition.yaml b/test/e2e/manifests/xfnrunner/tmp-writer/composition.yaml new file mode 100644 index 000000000..a283bd74f --- /dev/null +++ b/test/e2e/manifests/xfnrunner/tmp-writer/composition.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: fn.xnopresources.nop.example.org + labels: + provider: provider-nop +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: XNopResource + resources: + - name: nopinstance1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 10s + - conditionType: Synced + conditionStatus: "False" + time: 0s + - conditionType: Synced + conditionStatus: "True" + time: 10s + writeConnectionSecretsToRef: + namespace: crossplane-system + name: nop-example-resource + functions: + - name: tmp-writer + type: Container + container: + image: public-docker-registry.xfn-registry.svc.cluster.local:5000/fn-tmp-writer:latest + imagePullPolicy: Always \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/validation/composition-schema-valid/prerequisites/definition.yaml b/test/e2e/manifests/xfnrunner/tmp-writer/prerequisites/definition.yaml similarity index 81% rename from test/e2e/manifests/apiextensions/validation/composition-schema-valid/prerequisites/definition.yaml rename to test/e2e/manifests/xfnrunner/tmp-writer/prerequisites/definition.yaml index afe55de66..bf70cb798 100644 --- a/test/e2e/manifests/apiextensions/validation/composition-schema-valid/prerequisites/definition.yaml +++ b/test/e2e/manifests/xfnrunner/tmp-writer/prerequisites/definition.yaml @@ -26,9 +26,4 @@ spec: coolField: type: string required: - - coolField - status: - type: object - properties: - coolerField: - type: string \ No newline at end of file + - coolField \ No newline at end of file diff --git a/test/e2e/manifests/lifecycle/upgrade/prerequisites/provider.yaml b/test/e2e/manifests/xfnrunner/tmp-writer/prerequisites/provider.yaml similarity index 100% rename from test/e2e/manifests/lifecycle/upgrade/prerequisites/provider.yaml rename to test/e2e/manifests/xfnrunner/tmp-writer/prerequisites/provider.yaml diff --git a/test/e2e/pkg_test.go b/test/e2e/pkg_test.go index 4b7693b2e..4c26269bb 100644 --- a/test/e2e/pkg_test.go +++ b/test/e2e/pkg_test.go @@ -32,140 +32,89 @@ import ( // Providers, Configurations, etc). const LabelAreaPkg = "pkg" -func TestConfiguration(t *testing.T) { - // Test that we can install a Configuration from a private repository using - // a package pull secret. +// TestConfigurationPullFromPrivateRegistry tests that a Configuration can be +// installed from a private registry using a package pull secret. +func TestConfigurationPullFromPrivateRegistry(t *testing.T) { manifests := "test/e2e/manifests/pkg/configuration/private" - private := features.Table{ - { - Name: "ConfigurationIsCreated", - Assessment: funcs.AllOf( + + environment.Test(t, + features.New("ConfigurationPullFromPrivateRegistry"). + WithLabel(LabelArea, LabelAreaPkg). + WithLabel(LabelSize, LabelSizeSmall). + WithSetup("CreateConfiguration", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "*.yaml"), funcs.ResourcesCreatedWithin(1*time.Minute, manifests, "*.yaml"), - ), - }, - { - Name: "ConfigurationIsHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "configuration.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "ConfigurationIsDeleted", - Assessment: funcs.AllOf( + )). + Assess("ConfigurationIsHealthy", funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "configuration.yaml", pkgv1.Healthy(), pkgv1.Active())). + WithTeardown("DeleteConfiguration", funcs.AllOf( funcs.DeleteResources(manifests, "*.yaml"), funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "*.yaml"), - ), - }, - } + )).Feature(), + ) +} + +// TestConfigurationWithDependency tests that a Configuration with a dependency +// on a Provider will become healthy when the Provider becomes healthy. +func TestConfigurationWithDependency(t *testing.T) { + manifests := "test/e2e/manifests/pkg/configuration/dependency" - manifests = "test/e2e/manifests/pkg/configuration/dependency" - dependency := features.Table{ - { - Name: "ConfigurationIsCreated", - Assessment: funcs.AllOf( + environment.Test(t, + features.New("ConfigurationWithDependency"). + WithLabel(LabelArea, LabelAreaPkg). + WithLabel(LabelSize, LabelSizeSmall). + WithSetup("ApplyConfiguration", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "configuration.yaml"), funcs.ResourcesCreatedWithin(1*time.Minute, manifests, "configuration.yaml"), - ), - }, - { - Name: "ConfigurationIsHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "configuration.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "ProviderIsHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-dependency.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "ConfigurationIsDeleted", - Assessment: funcs.AllOf( + )). + Assess("ConfigurationIsHealthy", + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "configuration.yaml", pkgv1.Healthy(), pkgv1.Active())). + Assess("RequiredProviderIsHealthy", + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-dependency.yaml", pkgv1.Healthy(), pkgv1.Active())). + // Dependencies are not automatically deleted. + WithTeardown("DeleteConfiguration", funcs.AllOf( funcs.DeleteResources(manifests, "configuration.yaml"), funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "configuration.yaml"), - ), - }, - { - // Dependencies are not automatically deleted. - Name: "ProviderIsDeleted", - Assessment: funcs.AllOf( + )). + WithTeardown("DeleteRequiredProvider", funcs.AllOf( funcs.DeleteResources(manifests, "provider-dependency.yaml"), funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "provider-dependency.yaml"), - ), - }, - } - - setup := funcs.ReadyToTestWithin(1*time.Minute, namespace) - environment.Test(t, - private.Build("PullFromPrivateRegistry"). - WithLabel(LabelArea, LabelAreaPkg). - WithLabel(LabelSize, LabelSizeSmall). - Setup(setup).Feature(), - dependency.Build("WithDependency"). - WithLabel(LabelArea, LabelAreaPkg). - WithLabel(LabelSize, LabelSizeSmall). - Setup(setup).Feature(), + )).Feature(), ) } -func TestProvider(t *testing.T) { +func TestProviderUpgrade(t *testing.T) { // Test that we can upgrade a provider to a new version, even when a managed // resource has been created. manifests := "test/e2e/manifests/pkg/provider" - upgrade := features.Table{ - { - Name: "ProviderIsInstalled", - Assessment: funcs.AllOf( + + environment.Test(t, + features.New("ProviderUpgrade"). + WithLabel(LabelArea, LabelAreaPkg). + WithLabel(LabelSize, LabelSizeSmall). + WithSetup("ApplyInitialProvider", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "provider-initial.yaml"), funcs.ResourcesCreatedWithin(1*time.Minute, manifests, "provider-initial.yaml"), - ), - }, - { - Name: "ProviderBecomesHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-initial.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "ManagedResourceIsCreated", - Assessment: funcs.AllOf( + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-initial.yaml", pkgv1.Healthy(), pkgv1.Active()), + )). + WithSetup("InitialManagedResourceIsReady", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "mr-initial.yaml"), funcs.ResourcesCreatedWithin(1*time.Minute, manifests, "mr-initial.yaml"), - ), - }, - { - Name: "ProviderIsUpgraded", - Assessment: funcs.ApplyResources(FieldManager, manifests, "provider-upgrade.yaml"), - }, - { - Name: "UpgradedProviderBecomesHealthy", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-upgrade.yaml", pkgv1.Healthy(), pkgv1.Active()), - }, - { - Name: "ManagedResourceIsUpdated", - Assessment: funcs.AllOf( + )). + Assess("UpgradeProvider", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "provider-upgrade.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "provider-upgrade.yaml", pkgv1.Healthy(), pkgv1.Active()), + )). + Assess("UpgradeManagedResource", funcs.AllOf( funcs.ApplyResources(FieldManager, manifests, "mr-upgrade.yaml"), - ), - }, - { - Name: "ManagedResourceBecomesAvailable", - Assessment: funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "mr.yaml", xpv1.Available()), - }, - { - Name: "ManagedResourceIsDeleted", - Assessment: funcs.AllOf( + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "mr.yaml", xpv1.Available()), + )). + WithTeardown("DeleteUpgradedManagedResource", funcs.AllOf( funcs.DeleteResources(manifests, "mr-upgrade.yaml"), funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "mr-upgrade.yaml"), - ), - }, - { - Name: "ProviderIsDeleted", - Assessment: funcs.AllOf( + )). + WithTeardown("DeleteUpgradedProvider", funcs.AllOf( funcs.DeleteResources(manifests, "provider-upgrade.yaml"), funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "provider-upgrade.yaml"), - ), - }, - } - - setup := funcs.ReadyToTestWithin(1*time.Minute, namespace) - environment.Test(t, - upgrade.Build("Upgrade"). - WithLabel(LabelArea, LabelAreaPkg). - WithLabel(LabelSize, LabelSizeSmall). - Setup(setup).Feature(), + )).Feature(), ) } diff --git a/test/e2e/testdata/images/labelizer/Dockerfile b/test/e2e/testdata/images/labelizer/Dockerfile new file mode 100644 index 000000000..18ca61c70 --- /dev/null +++ b/test/e2e/testdata/images/labelizer/Dockerfile @@ -0,0 +1,6 @@ +FROM mikefarah/yq:4.34.2 + +COPY labelizer.sh /bin +USER root + +ENTRYPOINT ["/bin/labelizer.sh"] \ No newline at end of file diff --git a/test/e2e/testdata/images/labelizer/labelizer.sh b/test/e2e/testdata/images/labelizer/labelizer.sh new file mode 100755 index 000000000..e0bfb2306 --- /dev/null +++ b/test/e2e/testdata/images/labelizer/labelizer.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +yq '(.desired.resources[] | .resource.metadata.labels) |= {"labelizer.xfn.crossplane.io/processed": "true"} + .' diff --git a/test/e2e/testdata/images/tmp-writer/Dockerfile b/test/e2e/testdata/images/tmp-writer/Dockerfile new file mode 100644 index 000000000..c5dc38d5e --- /dev/null +++ b/test/e2e/testdata/images/tmp-writer/Dockerfile @@ -0,0 +1,6 @@ +FROM mikefarah/yq:4.34.2 + +COPY --chmod=+x writer.sh /bin +USER root + +ENTRYPOINT ["/bin/writer.sh"] \ No newline at end of file diff --git a/test/e2e/testdata/images/tmp-writer/writer.sh b/test/e2e/testdata/images/tmp-writer/writer.sh new file mode 100755 index 000000000..08ef0ea3c --- /dev/null +++ b/test/e2e/testdata/images/tmp-writer/writer.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +touch "/tmp/foo.txt" || exit 1 + +yq '(.desired.resources[] | .resource.metadata.labels) |= {"tmp-writer.xfn.crossplane.io": "true"} + .' diff --git a/test/e2e/testdata/kindConfig.yaml b/test/e2e/testdata/kindConfig.yaml new file mode 100644 index 000000000..90ee5b5e5 --- /dev/null +++ b/test/e2e/testdata/kindConfig.yaml @@ -0,0 +1,8 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + # expose NodePort 32000 to the host + - containerPort: 32000 + hostPort: 3000 \ No newline at end of file diff --git a/test/e2e/utils/cert.go b/test/e2e/utils/cert.go new file mode 100644 index 000000000..d327fe80c --- /dev/null +++ b/test/e2e/utils/cert.go @@ -0,0 +1,84 @@ +/* +Copyright 2023 The Crossplane Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// CreateCert create TLS certificate for given dns name +// and returns CA and key in PEM format, or an error +func CreateCert(dnsName string) (string, string, error) { + ca := &x509.Certificate{ + SerialNumber: big.NewInt(2019), + Subject: pkix.Name{ + Organization: []string{"Company, INC."}, + Country: []string{"US"}, + Province: []string{""}, + Locality: []string{""}, + StreetAddress: []string{""}, + PostalCode: []string{""}, + CommonName: dnsName, + }, + DNSNames: []string{dnsName}, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + // create our private and public key + caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return "", "", err + } + + // create the CA + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + return "", "", err + } + + // pem encode + caPEM := new(bytes.Buffer) + err = pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + if err != nil { + return "", "", err + } + + keyPEM := new(bytes.Buffer) + err = pem.Encode(keyPEM, &pem.Block{ + Type: "PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey), + }) + if err != nil { + return "", "", err + } + + return caPEM.String(), keyPEM.String(), nil +} diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go new file mode 100644 index 000000000..6112fd515 --- /dev/null +++ b/test/e2e/utils/utils.go @@ -0,0 +1,19 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package utils contains convenience functions for certification creations +// and for retrieving k8s objects and their field values +package utils diff --git a/test/e2e/xfn_test.go b/test/e2e/xfn_test.go new file mode 100644 index 000000000..be22147f6 --- /dev/null +++ b/test/e2e/xfn_test.go @@ -0,0 +1,266 @@ +/* +Copyright 2023 The Crossplane Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "context" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/pkg/features" + "sigs.k8s.io/e2e-framework/third_party/helm" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/test/e2e/funcs" + "github.com/crossplane/crossplane/test/e2e/utils" +) + +const ( + registryNs = "xfn-registry" + + timeoutFive = 5 * time.Minute + timeoutOne = 1 * time.Minute +) + +func TestXfnRunnerImagePull(t *testing.T) { + + manifests := "test/e2e/manifests/xfnrunner/private-registry/pull" + environment.Test(t, + features.New("PullFnImageFromPrivateRegistryWithCustomCert"). + WithLabel(LabelArea, "xfn"). + WithSetup("InstallRegistryWithCustomTlsCertificate", + funcs.AllOf( + funcs.AsFeaturesFunc(envfuncs.CreateNamespace(registryNs)), + func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + dnsName := "private-docker-registry.xfn-registry.svc.cluster.local" + caPem, keyPem, err := utils.CreateCert(dnsName) + if err != nil { + t.Fatal(err) + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reg-cert", + Namespace: registryNs, + }, + Type: corev1.SecretTypeTLS, + StringData: map[string]string{ + "tls.crt": caPem, + "tls.key": keyPem, + }, + } + client := config.Client().Resources() + if err := client.Create(ctx, secret); err != nil { + t.Fatalf("Cannot create secret %s: %v", secret.Name, err) + } + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reg-ca", + Namespace: namespace, + }, + Data: map[string]string{ + "domain.crt": caPem, + }, + } + if err := client.Create(ctx, configMap); err != nil { + t.Fatalf("Cannot create config %s: %v", configMap.Name, err) + } + return ctx + }, + + funcs.AsFeaturesFunc( + funcs.HelmRepo( + helm.WithArgs("add"), + helm.WithArgs("twuni"), + helm.WithArgs("https://helm.twun.io"), + )), + funcs.AsFeaturesFunc( + funcs.HelmInstall( + helm.WithName("private"), + helm.WithNamespace(registryNs), + helm.WithWait(), + helm.WithChart("twuni/docker-registry"), + helm.WithVersion("2.2.2"), + helm.WithArgs( + "--set service.type=NodePort", + "--set service.nodePort=32000", + "--set tlsSecretName=reg-cert", + ), + ))), + ). + WithSetup("CopyFnImageToRegistry", + funcs.CopyImageToRegistry(clusterName, registryNs, "private-docker-registry", "crossplane-e2e/fn-labelizer:latest", timeoutOne)). + WithSetup("CrossplaneDeployedWithFunctionsEnabled", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade( + HelmOptions( + helm.WithArgs( + "--set args={--debug,--enable-composition-functions}", + "--set xfn.enabled=true", + "--set xfn.args={--debug}", + "--set registryCaBundleConfig.name=reg-ca", + "--set registryCaBundleConfig.key=domain.crt", + "--set xfn.resources.requests.cpu=100m", + "--set xfn.resources.limits.cpu=100m", + ), + helm.WithWait())...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + WithSetup("ProviderNopDeployed", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "prerequisites/provider.yaml"), + funcs.ApplyResources(FieldManager, manifests, "prerequisites/definition.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", v1.WatchingComposite()), + )). + Assess("CompositionWithFunctionIsCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "composition.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition.yaml"), + )). + Assess("ClaimIsCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), + )). + Assess("ClaimBecomesAvailable", funcs.ResourcesHaveConditionWithin(timeoutFive, manifests, "claim.yaml", xpv1.Available())). + Assess("ManagedResourcesProcessedByFunction", funcs.ManagedResourcesOfClaimHaveFieldValueWithin(timeoutFive, manifests, "claim.yaml", "metadata.labels[labelizer.xfn.crossplane.io/processed]", "true", nil)). + WithTeardown("DeleteClaim", funcs.AllOf( + funcs.DeleteResources(manifests, "claim.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "claim.yaml"), + )). + WithTeardown("DeleteComposition", funcs.AllOf( + funcs.DeleteResources(manifests, "composition.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "composition.yaml"), + )). + WithTeardown("ProviderNopRemoved", funcs.AllOf( + funcs.DeleteResources(manifests, "prerequisites/provider.yaml"), + funcs.DeleteResources(manifests, "prerequisites/definition.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"), + )). + WithTeardown("RemoveRegistry", funcs.AllOf( + funcs.AsFeaturesFunc(envfuncs.DeleteNamespace(registryNs)), + func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + client := config.Client().Resources(namespace) + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reg-ca", + Namespace: namespace, + }, + } + err := client.Delete(ctx, configMap) + if err != nil { + t.Fatal(err) + } + return ctx + }, + )). + WithTeardown("CrossplaneDeployedWithoutFunctionsEnabled", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + Feature(), + ) +} + +func TestXfnRunnerWriteToTmp(t *testing.T) { + manifests := "test/e2e/manifests/xfnrunner/tmp-writer" + environment.Test(t, + features.New("CreateAFileInTmpFolder"). + WithLabel(LabelArea, "xfn"). + WithSetup("InstallRegistry", + funcs.AllOf( + funcs.AsFeaturesFunc(envfuncs.CreateNamespace(registryNs)), + funcs.AsFeaturesFunc( + funcs.HelmRepo( + helm.WithArgs("add"), + helm.WithArgs("twuni"), + helm.WithArgs("https://helm.twun.io"), + )), + funcs.AsFeaturesFunc( + funcs.HelmInstall( + helm.WithName("public"), + helm.WithNamespace(registryNs), + helm.WithWait(), + helm.WithChart("twuni/docker-registry"), + helm.WithVersion("2.2.2"), + helm.WithArgs( + "--set service.type=NodePort", + "--set service.nodePort=32000", + ), + ))), + ). + WithSetup("CopyFnImageToRegistry", + funcs.CopyImageToRegistry(clusterName, registryNs, "public-docker-registry", "crossplane-e2e/fn-tmp-writer:latest", timeoutOne)). + WithSetup("CrossplaneDeployedWithFunctionsEnabled", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade( + HelmOptions( + helm.WithArgs( + "--set args={--debug,--enable-composition-functions}", + "--set xfn.enabled=true", + "--set xfn.args={--debug}", + "--set xfn.resources.requests.cpu=100m", + "--set xfn.resources.limits.cpu=100m", + ), + helm.WithWait())...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + WithSetup("ProviderNopDeployed", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "prerequisites/provider.yaml"), + funcs.ApplyResources(FieldManager, manifests, "prerequisites/definition.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", v1.WatchingComposite()), + )). + Assess("CompositionWithFunctionIsCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "composition.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition.yaml"), + )). + Assess("ClaimIsCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "claim.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), + )). + Assess("ClaimBecomesAvailable", + funcs.ResourcesHaveConditionWithin(timeoutFive, manifests, "claim.yaml", xpv1.Available())). + Assess("ManagedResourcesProcessedByFunction", + funcs.ManagedResourcesOfClaimHaveFieldValueWithin(timeoutFive, manifests, "claim.yaml", "metadata.labels[tmp-writer.xfn.crossplane.io]", "true", + funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"}))). + WithTeardown("DeleteClaim", funcs.AllOf( + funcs.DeleteResources(manifests, "claim.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "claim.yaml"), + )). + WithTeardown("DeleteComposition", funcs.AllOf( + funcs.DeleteResources(manifests, "composition.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "composition.yaml"), + )). + WithTeardown("ProviderNopRemoved", funcs.AllOf( + funcs.DeleteResources(manifests, "prerequisites/provider.yaml"), + funcs.DeleteResources(manifests, "prerequisites/definition.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"), + funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"), + )). + WithTeardown("RemoveRegistry", funcs.AsFeaturesFunc(envfuncs.DeleteNamespace(registryNs))). + WithTeardown("CrossplaneDeployedWithoutFunctionsEnabled", funcs.AllOf( + funcs.AsFeaturesFunc(funcs.HelmUpgrade(HelmOptions()...)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + Feature(), + ) +}